2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
121 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
122 static MemoryRegion io_mem_subpage_ram
;
127 /* current CPU in the current thread. It is only valid inside
129 DEFINE_TLS(CPUState
*,cpu_single_env
);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
135 typedef struct PageDesc
{
136 /* list of TBs intersecting this ram page */
137 TranslationBlock
*first_tb
;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count
;
141 uint8_t *code_bitmap
;
142 #if defined(CONFIG_USER_ONLY)
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_SIZE (1 << L2_BITS)
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173 #define V_L1_BITS V_L1_BITS_REM
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
180 unsigned long qemu_real_host_page_size
;
181 unsigned long qemu_host_page_size
;
182 unsigned long qemu_host_page_mask
;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map
[V_L1_SIZE
];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc
{
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset
;
192 ram_addr_t region_offset
;
195 typedef struct PhysPageEntry PhysPageEntry
;
197 struct PhysPageEntry
{
204 /* This is a multi-level map on the physical address space.
205 The bottom level has pointers to PhysPageDesc. */
206 static PhysPageEntry phys_map
;
208 static void io_mem_init(void);
209 static void memory_map_init(void);
211 /* io memory support */
212 MemoryRegion
*io_mem_region
[IO_MEM_NB_ENTRIES
];
213 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
214 static MemoryRegion io_mem_watch
;
219 static const char *logfilename
= "qemu.log";
221 static const char *logfilename
= "/tmp/qemu.log";
225 static int log_append
= 0;
228 #if !defined(CONFIG_USER_ONLY)
229 static int tlb_flush_count
;
231 static int tb_flush_count
;
232 static int tb_phys_invalidate_count
;
235 static void map_exec(void *addr
, long size
)
238 VirtualProtect(addr
, size
,
239 PAGE_EXECUTE_READWRITE
, &old_protect
);
243 static void map_exec(void *addr
, long size
)
245 unsigned long start
, end
, page_size
;
247 page_size
= getpagesize();
248 start
= (unsigned long)addr
;
249 start
&= ~(page_size
- 1);
251 end
= (unsigned long)addr
+ size
;
252 end
+= page_size
- 1;
253 end
&= ~(page_size
- 1);
255 mprotect((void *)start
, end
- start
,
256 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
260 static void page_init(void)
262 /* NOTE: we can always suppose that qemu_host_page_size >=
266 SYSTEM_INFO system_info
;
268 GetSystemInfo(&system_info
);
269 qemu_real_host_page_size
= system_info
.dwPageSize
;
272 qemu_real_host_page_size
= getpagesize();
274 if (qemu_host_page_size
== 0)
275 qemu_host_page_size
= qemu_real_host_page_size
;
276 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
277 qemu_host_page_size
= TARGET_PAGE_SIZE
;
278 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
280 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
282 #ifdef HAVE_KINFO_GETVMMAP
283 struct kinfo_vmentry
*freep
;
286 freep
= kinfo_getvmmap(getpid(), &cnt
);
289 for (i
= 0; i
< cnt
; i
++) {
290 unsigned long startaddr
, endaddr
;
292 startaddr
= freep
[i
].kve_start
;
293 endaddr
= freep
[i
].kve_end
;
294 if (h2g_valid(startaddr
)) {
295 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
297 if (h2g_valid(endaddr
)) {
298 endaddr
= h2g(endaddr
);
299 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
301 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
314 last_brk
= (unsigned long)sbrk(0);
316 f
= fopen("/compat/linux/proc/self/maps", "r");
321 unsigned long startaddr
, endaddr
;
324 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
326 if (n
== 2 && h2g_valid(startaddr
)) {
327 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
329 if (h2g_valid(endaddr
)) {
330 endaddr
= h2g(endaddr
);
334 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
346 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
352 #if defined(CONFIG_USER_ONLY)
353 /* We can't use g_malloc because it may recurse into a locked mutex. */
354 # define ALLOC(P, SIZE) \
356 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
360 # define ALLOC(P, SIZE) \
361 do { P = g_malloc0(SIZE); } while (0)
364 /* Level 1. Always allocated. */
365 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
368 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
375 ALLOC(p
, sizeof(void *) * L2_SIZE
);
379 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
387 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
393 return pd
+ (index
& (L2_SIZE
- 1));
396 static inline PageDesc
*page_find(tb_page_addr_t index
)
398 return page_find_alloc(index
, 0);
401 #if !defined(CONFIG_USER_ONLY)
402 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
404 PhysPageEntry
*lp
, *p
;
410 for (i
= P_L2_LEVELS
- 1; i
>= 0; i
--) {
411 if (lp
->u
.node
== NULL
) {
415 lp
->u
.node
= p
= g_malloc0(sizeof(PhysPageEntry
) * L2_SIZE
);
417 int first_index
= index
& ~(L2_SIZE
- 1);
418 for (j
= 0; j
< L2_SIZE
; j
++) {
419 p
[j
].u
.leaf
.phys_offset
= io_mem_unassigned
.ram_addr
;
420 p
[j
].u
.leaf
.region_offset
421 = (first_index
+ j
) << TARGET_PAGE_BITS
;
425 lp
= &lp
->u
.node
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
431 static inline PhysPageDesc
phys_page_find(target_phys_addr_t index
)
433 PhysPageDesc
*p
= phys_page_find_alloc(index
, 0);
438 return (PhysPageDesc
) {
439 .phys_offset
= io_mem_unassigned
.ram_addr
,
440 .region_offset
= index
<< TARGET_PAGE_BITS
,
445 static void tlb_protect_code(ram_addr_t ram_addr
);
446 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
448 #define mmap_lock() do { } while(0)
449 #define mmap_unlock() do { } while(0)
452 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
454 #if defined(CONFIG_USER_ONLY)
455 /* Currently it is not recommended to allocate big chunks of data in
456 user mode. It will change when a dedicated libc will be used */
457 #define USE_STATIC_CODE_GEN_BUFFER
460 #ifdef USE_STATIC_CODE_GEN_BUFFER
461 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
462 __attribute__((aligned (CODE_GEN_ALIGN
)));
465 static void code_gen_alloc(unsigned long tb_size
)
467 #ifdef USE_STATIC_CODE_GEN_BUFFER
468 code_gen_buffer
= static_code_gen_buffer
;
469 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
470 map_exec(code_gen_buffer
, code_gen_buffer_size
);
472 code_gen_buffer_size
= tb_size
;
473 if (code_gen_buffer_size
== 0) {
474 #if defined(CONFIG_USER_ONLY)
475 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
477 /* XXX: needs adjustments */
478 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
481 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
482 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
483 /* The code gen buffer location may have constraints depending on
484 the host cpu and OS */
485 #if defined(__linux__)
490 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
491 #if defined(__x86_64__)
493 /* Cannot map more than that */
494 if (code_gen_buffer_size
> (800 * 1024 * 1024))
495 code_gen_buffer_size
= (800 * 1024 * 1024);
496 #elif defined(__sparc_v9__)
497 // Map the buffer below 2G, so we can use direct calls and branches
499 start
= (void *) 0x60000000UL
;
500 if (code_gen_buffer_size
> (512 * 1024 * 1024))
501 code_gen_buffer_size
= (512 * 1024 * 1024);
502 #elif defined(__arm__)
503 /* Keep the buffer no bigger than 16MB to branch between blocks */
504 if (code_gen_buffer_size
> 16 * 1024 * 1024)
505 code_gen_buffer_size
= 16 * 1024 * 1024;
506 #elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
512 start
= (void *)0x90000000UL
;
514 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
515 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
517 if (code_gen_buffer
== MAP_FAILED
) {
518 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
522 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__) \
524 || defined(__NetBSD__)
528 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
529 #if defined(__x86_64__)
530 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
531 * 0x40000000 is free */
533 addr
= (void *)0x40000000;
534 /* Cannot map more than that */
535 if (code_gen_buffer_size
> (800 * 1024 * 1024))
536 code_gen_buffer_size
= (800 * 1024 * 1024);
537 #elif defined(__sparc_v9__)
538 // Map the buffer below 2G, so we can use direct calls and branches
540 addr
= (void *) 0x60000000UL
;
541 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
542 code_gen_buffer_size
= (512 * 1024 * 1024);
545 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
546 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
548 if (code_gen_buffer
== MAP_FAILED
) {
549 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
554 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
555 map_exec(code_gen_buffer
, code_gen_buffer_size
);
557 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
558 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
559 code_gen_buffer_max_size
= code_gen_buffer_size
-
560 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
561 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
562 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
565 /* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
568 void tcg_exec_init(unsigned long tb_size
)
571 code_gen_alloc(tb_size
);
572 code_gen_ptr
= code_gen_buffer
;
574 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx
);
581 bool tcg_enabled(void)
583 return code_gen_buffer
!= NULL
;
586 void cpu_exec_init_all(void)
588 #if !defined(CONFIG_USER_ONLY)
594 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596 static int cpu_common_post_load(void *opaque
, int version_id
)
598 CPUState
*env
= opaque
;
600 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
601 version_id is increased. */
602 env
->interrupt_request
&= ~0x01;
608 static const VMStateDescription vmstate_cpu_common
= {
609 .name
= "cpu_common",
611 .minimum_version_id
= 1,
612 .minimum_version_id_old
= 1,
613 .post_load
= cpu_common_post_load
,
614 .fields
= (VMStateField
[]) {
615 VMSTATE_UINT32(halted
, CPUState
),
616 VMSTATE_UINT32(interrupt_request
, CPUState
),
617 VMSTATE_END_OF_LIST()
622 CPUState
*qemu_get_cpu(int cpu
)
624 CPUState
*env
= first_cpu
;
627 if (env
->cpu_index
== cpu
)
635 void cpu_exec_init(CPUState
*env
)
640 #if defined(CONFIG_USER_ONLY)
643 env
->next_cpu
= NULL
;
646 while (*penv
!= NULL
) {
647 penv
= &(*penv
)->next_cpu
;
650 env
->cpu_index
= cpu_index
;
652 QTAILQ_INIT(&env
->breakpoints
);
653 QTAILQ_INIT(&env
->watchpoints
);
654 #ifndef CONFIG_USER_ONLY
655 env
->thread_id
= qemu_get_thread_id();
658 #if defined(CONFIG_USER_ONLY)
661 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
662 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
663 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
664 cpu_save
, cpu_load
, env
);
668 /* Allocate a new translation block. Flush the translation buffer if
669 too many translation blocks or too much generated code. */
670 static TranslationBlock
*tb_alloc(target_ulong pc
)
672 TranslationBlock
*tb
;
674 if (nb_tbs
>= code_gen_max_blocks
||
675 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
683 void tb_free(TranslationBlock
*tb
)
685 /* In practice this is mostly used for single use temporary TB
686 Ignore the hard cases and just back up if this TB happens to
687 be the last one generated. */
688 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
689 code_gen_ptr
= tb
->tc_ptr
;
694 static inline void invalidate_page_bitmap(PageDesc
*p
)
696 if (p
->code_bitmap
) {
697 g_free(p
->code_bitmap
);
698 p
->code_bitmap
= NULL
;
700 p
->code_write_count
= 0;
703 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
705 static void page_flush_tb_1 (int level
, void **lp
)
714 for (i
= 0; i
< L2_SIZE
; ++i
) {
715 pd
[i
].first_tb
= NULL
;
716 invalidate_page_bitmap(pd
+ i
);
720 for (i
= 0; i
< L2_SIZE
; ++i
) {
721 page_flush_tb_1 (level
- 1, pp
+ i
);
726 static void page_flush_tb(void)
729 for (i
= 0; i
< V_L1_SIZE
; i
++) {
730 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
734 /* flush all the translation blocks */
735 /* XXX: tb_flush is currently not thread safe */
736 void tb_flush(CPUState
*env1
)
739 #if defined(DEBUG_FLUSH)
740 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
741 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
743 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
745 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
746 cpu_abort(env1
, "Internal error: code buffer overflow\n");
750 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
751 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
754 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
757 code_gen_ptr
= code_gen_buffer
;
758 /* XXX: flush processor icache at this point if cache flush is
763 #ifdef DEBUG_TB_CHECK
765 static void tb_invalidate_check(target_ulong address
)
767 TranslationBlock
*tb
;
769 address
&= TARGET_PAGE_MASK
;
770 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
771 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
772 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
773 address
>= tb
->pc
+ tb
->size
)) {
774 printf("ERROR invalidate: address=" TARGET_FMT_lx
775 " PC=%08lx size=%04x\n",
776 address
, (long)tb
->pc
, tb
->size
);
782 /* verify that all the pages have correct rights for code */
783 static void tb_page_check(void)
785 TranslationBlock
*tb
;
786 int i
, flags1
, flags2
;
788 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
789 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
790 flags1
= page_get_flags(tb
->pc
);
791 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
792 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
793 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
794 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
802 /* invalidate one TB */
803 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
806 TranslationBlock
*tb1
;
810 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
813 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
817 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
819 TranslationBlock
*tb1
;
825 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
827 *ptb
= tb1
->page_next
[n1
];
830 ptb
= &tb1
->page_next
[n1
];
834 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
836 TranslationBlock
*tb1
, **ptb
;
839 ptb
= &tb
->jmp_next
[n
];
842 /* find tb(n) in circular list */
846 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
847 if (n1
== n
&& tb1
== tb
)
850 ptb
= &tb1
->jmp_first
;
852 ptb
= &tb1
->jmp_next
[n1
];
855 /* now we can suppress tb(n) from the list */
856 *ptb
= tb
->jmp_next
[n
];
858 tb
->jmp_next
[n
] = NULL
;
862 /* reset the jump entry 'n' of a TB so that it is not chained to
864 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
866 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
869 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
874 tb_page_addr_t phys_pc
;
875 TranslationBlock
*tb1
, *tb2
;
877 /* remove the TB from the hash list */
878 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
879 h
= tb_phys_hash_func(phys_pc
);
880 tb_remove(&tb_phys_hash
[h
], tb
,
881 offsetof(TranslationBlock
, phys_hash_next
));
883 /* remove the TB from the page list */
884 if (tb
->page_addr
[0] != page_addr
) {
885 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
886 tb_page_remove(&p
->first_tb
, tb
);
887 invalidate_page_bitmap(p
);
889 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
890 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
891 tb_page_remove(&p
->first_tb
, tb
);
892 invalidate_page_bitmap(p
);
895 tb_invalidated_flag
= 1;
897 /* remove the TB from the hash list */
898 h
= tb_jmp_cache_hash_func(tb
->pc
);
899 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
900 if (env
->tb_jmp_cache
[h
] == tb
)
901 env
->tb_jmp_cache
[h
] = NULL
;
904 /* suppress this TB from the two jump lists */
905 tb_jmp_remove(tb
, 0);
906 tb_jmp_remove(tb
, 1);
908 /* suppress any remaining jumps to this TB */
914 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
915 tb2
= tb1
->jmp_next
[n1
];
916 tb_reset_jump(tb1
, n1
);
917 tb1
->jmp_next
[n1
] = NULL
;
920 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
922 tb_phys_invalidate_count
++;
925 static inline void set_bits(uint8_t *tab
, int start
, int len
)
931 mask
= 0xff << (start
& 7);
932 if ((start
& ~7) == (end
& ~7)) {
934 mask
&= ~(0xff << (end
& 7));
939 start
= (start
+ 8) & ~7;
941 while (start
< end1
) {
946 mask
= ~(0xff << (end
& 7));
952 static void build_page_bitmap(PageDesc
*p
)
954 int n
, tb_start
, tb_end
;
955 TranslationBlock
*tb
;
957 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
962 tb
= (TranslationBlock
*)((long)tb
& ~3);
963 /* NOTE: this is subtle as a TB may span two physical pages */
965 /* NOTE: tb_end may be after the end of the page, but
966 it is not a problem */
967 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
968 tb_end
= tb_start
+ tb
->size
;
969 if (tb_end
> TARGET_PAGE_SIZE
)
970 tb_end
= TARGET_PAGE_SIZE
;
973 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
975 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
976 tb
= tb
->page_next
[n
];
980 TranslationBlock
*tb_gen_code(CPUState
*env
,
981 target_ulong pc
, target_ulong cs_base
,
982 int flags
, int cflags
)
984 TranslationBlock
*tb
;
986 tb_page_addr_t phys_pc
, phys_page2
;
987 target_ulong virt_page2
;
990 phys_pc
= get_page_addr_code(env
, pc
);
993 /* flush must be done */
995 /* cannot fail at this point */
997 /* Don't forget to invalidate previous TB info. */
998 tb_invalidated_flag
= 1;
1000 tc_ptr
= code_gen_ptr
;
1001 tb
->tc_ptr
= tc_ptr
;
1002 tb
->cs_base
= cs_base
;
1004 tb
->cflags
= cflags
;
1005 cpu_gen_code(env
, tb
, &code_gen_size
);
1006 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1008 /* check next page if needed */
1009 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1011 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1012 phys_page2
= get_page_addr_code(env
, virt_page2
);
1014 tb_link_page(tb
, phys_pc
, phys_page2
);
1018 /* invalidate all TBs which intersect with the target physical page
1019 starting in range [start;end[. NOTE: start and end must refer to
1020 the same physical page. 'is_cpu_write_access' should be true if called
1021 from a real cpu write access: the virtual CPU will exit the current
1022 TB if code is modified inside this TB. */
1023 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1024 int is_cpu_write_access
)
1026 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1027 CPUState
*env
= cpu_single_env
;
1028 tb_page_addr_t tb_start
, tb_end
;
1031 #ifdef TARGET_HAS_PRECISE_SMC
1032 int current_tb_not_found
= is_cpu_write_access
;
1033 TranslationBlock
*current_tb
= NULL
;
1034 int current_tb_modified
= 0;
1035 target_ulong current_pc
= 0;
1036 target_ulong current_cs_base
= 0;
1037 int current_flags
= 0;
1038 #endif /* TARGET_HAS_PRECISE_SMC */
1040 p
= page_find(start
>> TARGET_PAGE_BITS
);
1043 if (!p
->code_bitmap
&&
1044 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1045 is_cpu_write_access
) {
1046 /* build code bitmap */
1047 build_page_bitmap(p
);
1050 /* we remove all the TBs in the range [start, end[ */
1051 /* XXX: see if in some cases it could be faster to invalidate all the code */
1053 while (tb
!= NULL
) {
1055 tb
= (TranslationBlock
*)((long)tb
& ~3);
1056 tb_next
= tb
->page_next
[n
];
1057 /* NOTE: this is subtle as a TB may span two physical pages */
1059 /* NOTE: tb_end may be after the end of the page, but
1060 it is not a problem */
1061 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1062 tb_end
= tb_start
+ tb
->size
;
1064 tb_start
= tb
->page_addr
[1];
1065 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1067 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1068 #ifdef TARGET_HAS_PRECISE_SMC
1069 if (current_tb_not_found
) {
1070 current_tb_not_found
= 0;
1072 if (env
->mem_io_pc
) {
1073 /* now we have a real cpu fault */
1074 current_tb
= tb_find_pc(env
->mem_io_pc
);
1077 if (current_tb
== tb
&&
1078 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1079 /* If we are modifying the current TB, we must stop
1080 its execution. We could be more precise by checking
1081 that the modification is after the current PC, but it
1082 would require a specialized function to partially
1083 restore the CPU state */
1085 current_tb_modified
= 1;
1086 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1087 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1090 #endif /* TARGET_HAS_PRECISE_SMC */
1091 /* we need to do that to handle the case where a signal
1092 occurs while doing tb_phys_invalidate() */
1095 saved_tb
= env
->current_tb
;
1096 env
->current_tb
= NULL
;
1098 tb_phys_invalidate(tb
, -1);
1100 env
->current_tb
= saved_tb
;
1101 if (env
->interrupt_request
&& env
->current_tb
)
1102 cpu_interrupt(env
, env
->interrupt_request
);
1107 #if !defined(CONFIG_USER_ONLY)
1108 /* if no code remaining, no need to continue to use slow writes */
1110 invalidate_page_bitmap(p
);
1111 if (is_cpu_write_access
) {
1112 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1116 #ifdef TARGET_HAS_PRECISE_SMC
1117 if (current_tb_modified
) {
1118 /* we generate a block containing just the instruction
1119 modifying the memory. It will ensure that it cannot modify
1121 env
->current_tb
= NULL
;
1122 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1123 cpu_resume_from_signal(env
, NULL
);
1128 /* len must be <= 8 and start must be a multiple of len */
1129 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1135 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1136 cpu_single_env
->mem_io_vaddr
, len
,
1137 cpu_single_env
->eip
,
1138 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1141 p
= page_find(start
>> TARGET_PAGE_BITS
);
1144 if (p
->code_bitmap
) {
1145 offset
= start
& ~TARGET_PAGE_MASK
;
1146 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1147 if (b
& ((1 << len
) - 1))
1151 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1155 #if !defined(CONFIG_SOFTMMU)
1156 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1157 unsigned long pc
, void *puc
)
1159 TranslationBlock
*tb
;
1162 #ifdef TARGET_HAS_PRECISE_SMC
1163 TranslationBlock
*current_tb
= NULL
;
1164 CPUState
*env
= cpu_single_env
;
1165 int current_tb_modified
= 0;
1166 target_ulong current_pc
= 0;
1167 target_ulong current_cs_base
= 0;
1168 int current_flags
= 0;
1171 addr
&= TARGET_PAGE_MASK
;
1172 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1176 #ifdef TARGET_HAS_PRECISE_SMC
1177 if (tb
&& pc
!= 0) {
1178 current_tb
= tb_find_pc(pc
);
1181 while (tb
!= NULL
) {
1183 tb
= (TranslationBlock
*)((long)tb
& ~3);
1184 #ifdef TARGET_HAS_PRECISE_SMC
1185 if (current_tb
== tb
&&
1186 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1187 /* If we are modifying the current TB, we must stop
1188 its execution. We could be more precise by checking
1189 that the modification is after the current PC, but it
1190 would require a specialized function to partially
1191 restore the CPU state */
1193 current_tb_modified
= 1;
1194 cpu_restore_state(current_tb
, env
, pc
);
1195 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1198 #endif /* TARGET_HAS_PRECISE_SMC */
1199 tb_phys_invalidate(tb
, addr
);
1200 tb
= tb
->page_next
[n
];
1203 #ifdef TARGET_HAS_PRECISE_SMC
1204 if (current_tb_modified
) {
1205 /* we generate a block containing just the instruction
1206 modifying the memory. It will ensure that it cannot modify
1208 env
->current_tb
= NULL
;
1209 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1210 cpu_resume_from_signal(env
, puc
);
1216 /* add the tb in the target page and protect it if necessary */
1217 static inline void tb_alloc_page(TranslationBlock
*tb
,
1218 unsigned int n
, tb_page_addr_t page_addr
)
1221 #ifndef CONFIG_USER_ONLY
1222 bool page_already_protected
;
1225 tb
->page_addr
[n
] = page_addr
;
1226 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1227 tb
->page_next
[n
] = p
->first_tb
;
1228 #ifndef CONFIG_USER_ONLY
1229 page_already_protected
= p
->first_tb
!= NULL
;
1231 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1232 invalidate_page_bitmap(p
);
1234 #if defined(TARGET_HAS_SMC) || 1
1236 #if defined(CONFIG_USER_ONLY)
1237 if (p
->flags
& PAGE_WRITE
) {
1242 /* force the host page as non writable (writes will have a
1243 page fault + mprotect overhead) */
1244 page_addr
&= qemu_host_page_mask
;
1246 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1247 addr
+= TARGET_PAGE_SIZE
) {
1249 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1253 p2
->flags
&= ~PAGE_WRITE
;
1255 mprotect(g2h(page_addr
), qemu_host_page_size
,
1256 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1257 #ifdef DEBUG_TB_INVALIDATE
1258 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1263 /* if some code is already present, then the pages are already
1264 protected. So we handle the case where only the first TB is
1265 allocated in a physical page */
1266 if (!page_already_protected
) {
1267 tlb_protect_code(page_addr
);
1271 #endif /* TARGET_HAS_SMC */
1274 /* add a new TB and link it to the physical page tables. phys_page2 is
1275 (-1) to indicate that only one page contains the TB. */
1276 void tb_link_page(TranslationBlock
*tb
,
1277 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1280 TranslationBlock
**ptb
;
1282 /* Grab the mmap lock to stop another thread invalidating this TB
1283 before we are done. */
1285 /* add in the physical hash table */
1286 h
= tb_phys_hash_func(phys_pc
);
1287 ptb
= &tb_phys_hash
[h
];
1288 tb
->phys_hash_next
= *ptb
;
1291 /* add in the page list */
1292 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1293 if (phys_page2
!= -1)
1294 tb_alloc_page(tb
, 1, phys_page2
);
1296 tb
->page_addr
[1] = -1;
1298 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1299 tb
->jmp_next
[0] = NULL
;
1300 tb
->jmp_next
[1] = NULL
;
1302 /* init original jump addresses */
1303 if (tb
->tb_next_offset
[0] != 0xffff)
1304 tb_reset_jump(tb
, 0);
1305 if (tb
->tb_next_offset
[1] != 0xffff)
1306 tb_reset_jump(tb
, 1);
1308 #ifdef DEBUG_TB_CHECK
1314 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1315 tb[1].tc_ptr. Return NULL if not found */
1316 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1318 int m_min
, m_max
, m
;
1320 TranslationBlock
*tb
;
1324 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1325 tc_ptr
>= (unsigned long)code_gen_ptr
)
1327 /* binary search (cf Knuth) */
1330 while (m_min
<= m_max
) {
1331 m
= (m_min
+ m_max
) >> 1;
1333 v
= (unsigned long)tb
->tc_ptr
;
1336 else if (tc_ptr
< v
) {
1345 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1347 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1349 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1352 tb1
= tb
->jmp_next
[n
];
1354 /* find head of list */
1357 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1360 tb1
= tb1
->jmp_next
[n1
];
1362 /* we are now sure now that tb jumps to tb1 */
1365 /* remove tb from the jmp_first list */
1366 ptb
= &tb_next
->jmp_first
;
1370 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1371 if (n1
== n
&& tb1
== tb
)
1373 ptb
= &tb1
->jmp_next
[n1
];
1375 *ptb
= tb
->jmp_next
[n
];
1376 tb
->jmp_next
[n
] = NULL
;
1378 /* suppress the jump to next tb in generated code */
1379 tb_reset_jump(tb
, n
);
1381 /* suppress jumps in the tb on which we could have jumped */
1382 tb_reset_jump_recursive(tb_next
);
1386 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1388 tb_reset_jump_recursive2(tb
, 0);
1389 tb_reset_jump_recursive2(tb
, 1);
1392 #if defined(TARGET_HAS_ICE)
1393 #if defined(CONFIG_USER_ONLY)
1394 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1396 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1399 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1401 target_phys_addr_t addr
;
1403 ram_addr_t ram_addr
;
1406 addr
= cpu_get_phys_page_debug(env
, pc
);
1407 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1409 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1410 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1413 #endif /* TARGET_HAS_ICE */
1415 #if defined(CONFIG_USER_ONLY)
1416 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1421 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1422 int flags
, CPUWatchpoint
**watchpoint
)
1427 /* Add a watchpoint. */
1428 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1429 int flags
, CPUWatchpoint
**watchpoint
)
1431 target_ulong len_mask
= ~(len
- 1);
1434 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1435 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1436 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1437 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1440 wp
= g_malloc(sizeof(*wp
));
1443 wp
->len_mask
= len_mask
;
1446 /* keep all GDB-injected watchpoints in front */
1448 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1450 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1452 tlb_flush_page(env
, addr
);
1459 /* Remove a specific watchpoint. */
1460 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1463 target_ulong len_mask
= ~(len
- 1);
1466 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1467 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1468 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1469 cpu_watchpoint_remove_by_ref(env
, wp
);
1476 /* Remove a specific watchpoint by reference. */
1477 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1479 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1481 tlb_flush_page(env
, watchpoint
->vaddr
);
1486 /* Remove all matching watchpoints. */
1487 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1489 CPUWatchpoint
*wp
, *next
;
1491 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1492 if (wp
->flags
& mask
)
1493 cpu_watchpoint_remove_by_ref(env
, wp
);
1498 /* Add a breakpoint. */
1499 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1500 CPUBreakpoint
**breakpoint
)
1502 #if defined(TARGET_HAS_ICE)
1505 bp
= g_malloc(sizeof(*bp
));
1510 /* keep all GDB-injected breakpoints in front */
1512 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1514 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1516 breakpoint_invalidate(env
, pc
);
1526 /* Remove a specific breakpoint. */
1527 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1529 #if defined(TARGET_HAS_ICE)
1532 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1533 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1534 cpu_breakpoint_remove_by_ref(env
, bp
);
1544 /* Remove a specific breakpoint by reference. */
1545 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1547 #if defined(TARGET_HAS_ICE)
1548 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1550 breakpoint_invalidate(env
, breakpoint
->pc
);
1556 /* Remove all matching breakpoints. */
1557 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1559 #if defined(TARGET_HAS_ICE)
1560 CPUBreakpoint
*bp
, *next
;
1562 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1563 if (bp
->flags
& mask
)
1564 cpu_breakpoint_remove_by_ref(env
, bp
);
1569 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1570 CPU loop after each instruction */
1571 void cpu_single_step(CPUState
*env
, int enabled
)
1573 #if defined(TARGET_HAS_ICE)
1574 if (env
->singlestep_enabled
!= enabled
) {
1575 env
->singlestep_enabled
= enabled
;
1577 kvm_update_guest_debug(env
, 0);
1579 /* must flush all the translated code to avoid inconsistencies */
1580 /* XXX: only flush what is necessary */
1587 /* enable or disable low levels log */
1588 void cpu_set_log(int log_flags
)
1590 loglevel
= log_flags
;
1591 if (loglevel
&& !logfile
) {
1592 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1594 perror(logfilename
);
1597 #if !defined(CONFIG_SOFTMMU)
1598 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1600 static char logfile_buf
[4096];
1601 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1603 #elif defined(_WIN32)
1604 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1605 setvbuf(logfile
, NULL
, _IONBF
, 0);
1607 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1611 if (!loglevel
&& logfile
) {
1617 void cpu_set_log_filename(const char *filename
)
1619 logfilename
= strdup(filename
);
1624 cpu_set_log(loglevel
);
1627 static void cpu_unlink_tb(CPUState
*env
)
1629 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1630 problem and hope the cpu will stop of its own accord. For userspace
1631 emulation this often isn't actually as bad as it sounds. Often
1632 signals are used primarily to interrupt blocking syscalls. */
1633 TranslationBlock
*tb
;
1634 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1636 spin_lock(&interrupt_lock
);
1637 tb
= env
->current_tb
;
1638 /* if the cpu is currently executing code, we must unlink it and
1639 all the potentially executing TB */
1641 env
->current_tb
= NULL
;
1642 tb_reset_jump_recursive(tb
);
1644 spin_unlock(&interrupt_lock
);
1647 #ifndef CONFIG_USER_ONLY
1648 /* mask must never be zero, except for A20 change call */
1649 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1653 old_mask
= env
->interrupt_request
;
1654 env
->interrupt_request
|= mask
;
1657 * If called from iothread context, wake the target cpu in
1660 if (!qemu_cpu_is_self(env
)) {
1666 env
->icount_decr
.u16
.high
= 0xffff;
1668 && (mask
& ~old_mask
) != 0) {
1669 cpu_abort(env
, "Raised interrupt while not in I/O function");
1676 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1678 #else /* CONFIG_USER_ONLY */
1680 void cpu_interrupt(CPUState
*env
, int mask
)
1682 env
->interrupt_request
|= mask
;
1685 #endif /* CONFIG_USER_ONLY */
1687 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1689 env
->interrupt_request
&= ~mask
;
1692 void cpu_exit(CPUState
*env
)
1694 env
->exit_request
= 1;
1698 const CPULogItem cpu_log_items
[] = {
1699 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1700 "show generated host assembly code for each compiled TB" },
1701 { CPU_LOG_TB_IN_ASM
, "in_asm",
1702 "show target assembly code for each compiled TB" },
1703 { CPU_LOG_TB_OP
, "op",
1704 "show micro ops for each compiled TB" },
1705 { CPU_LOG_TB_OP_OPT
, "op_opt",
1708 "before eflags optimization and "
1710 "after liveness analysis" },
1711 { CPU_LOG_INT
, "int",
1712 "show interrupts/exceptions in short format" },
1713 { CPU_LOG_EXEC
, "exec",
1714 "show trace before each executed TB (lots of logs)" },
1715 { CPU_LOG_TB_CPU
, "cpu",
1716 "show CPU state before block translation" },
1718 { CPU_LOG_PCALL
, "pcall",
1719 "show protected mode far calls/returns/exceptions" },
1720 { CPU_LOG_RESET
, "cpu_reset",
1721 "show CPU state before CPU resets" },
1724 { CPU_LOG_IOPORT
, "ioport",
1725 "show all i/o ports accesses" },
1730 static int cmp1(const char *s1
, int n
, const char *s2
)
1732 if (strlen(s2
) != n
)
1734 return memcmp(s1
, s2
, n
) == 0;
1737 /* takes a comma separated list of log masks. Return 0 if error. */
1738 int cpu_str_to_log_mask(const char *str
)
1740 const CPULogItem
*item
;
1747 p1
= strchr(p
, ',');
1750 if(cmp1(p
,p1
-p
,"all")) {
1751 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1755 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1756 if (cmp1(p
, p1
- p
, item
->name
))
1770 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1777 fprintf(stderr
, "qemu: fatal: ");
1778 vfprintf(stderr
, fmt
, ap
);
1779 fprintf(stderr
, "\n");
1781 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1783 cpu_dump_state(env
, stderr
, fprintf
, 0);
1785 if (qemu_log_enabled()) {
1786 qemu_log("qemu: fatal: ");
1787 qemu_log_vprintf(fmt
, ap2
);
1790 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1792 log_cpu_state(env
, 0);
1799 #if defined(CONFIG_USER_ONLY)
1801 struct sigaction act
;
1802 sigfillset(&act
.sa_mask
);
1803 act
.sa_handler
= SIG_DFL
;
1804 sigaction(SIGABRT
, &act
, NULL
);
1810 CPUState
*cpu_copy(CPUState
*env
)
1812 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1813 CPUState
*next_cpu
= new_env
->next_cpu
;
1814 int cpu_index
= new_env
->cpu_index
;
1815 #if defined(TARGET_HAS_ICE)
1820 memcpy(new_env
, env
, sizeof(CPUState
));
1822 /* Preserve chaining and index. */
1823 new_env
->next_cpu
= next_cpu
;
1824 new_env
->cpu_index
= cpu_index
;
1826 /* Clone all break/watchpoints.
1827 Note: Once we support ptrace with hw-debug register access, make sure
1828 BP_CPU break/watchpoints are handled correctly on clone. */
1829 QTAILQ_INIT(&env
->breakpoints
);
1830 QTAILQ_INIT(&env
->watchpoints
);
1831 #if defined(TARGET_HAS_ICE)
1832 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1833 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1835 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1836 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1844 #if !defined(CONFIG_USER_ONLY)
1846 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1850 /* Discard jump cache entries for any tb which might potentially
1851 overlap the flushed page. */
1852 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1853 memset (&env
->tb_jmp_cache
[i
], 0,
1854 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1856 i
= tb_jmp_cache_hash_page(addr
);
1857 memset (&env
->tb_jmp_cache
[i
], 0,
1858 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1861 static CPUTLBEntry s_cputlb_empty_entry
= {
1869 * If flush_global is true (the usual case), flush all tlb entries.
1870 * If flush_global is false, flush (at least) all tlb entries not
1873 * Since QEMU doesn't currently implement a global/not-global flag
1874 * for tlb entries, at the moment tlb_flush() will also flush all
1875 * tlb entries in the flush_global == false case. This is OK because
1876 * CPU architectures generally permit an implementation to drop
1877 * entries from the TLB at any time, so flushing more entries than
1878 * required is only an efficiency issue, not a correctness issue.
1880 void tlb_flush(CPUState
*env
, int flush_global
)
1884 #if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env
->current_tb
= NULL
;
1891 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1893 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1894 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1898 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1900 env
->tlb_flush_addr
= -1;
1901 env
->tlb_flush_mask
= 0;
1905 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1907 if (addr
== (tlb_entry
->addr_read
&
1908 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1909 addr
== (tlb_entry
->addr_write
&
1910 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1911 addr
== (tlb_entry
->addr_code
&
1912 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1913 *tlb_entry
= s_cputlb_empty_entry
;
1917 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1927 #if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1930 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env
->current_tb
= NULL
;
1939 addr
&= TARGET_PAGE_MASK
;
1940 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1941 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1942 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1944 tlb_flush_jmp_cache(env
, addr
);
1947 /* update the TLBs so that writes to code in the virtual page 'addr'
1949 static void tlb_protect_code(ram_addr_t ram_addr
)
1951 cpu_physical_memory_reset_dirty(ram_addr
,
1952 ram_addr
+ TARGET_PAGE_SIZE
,
1956 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1957 tested for self modifying code */
1958 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1961 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1964 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1965 unsigned long start
, unsigned long length
)
1968 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
1969 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1970 if ((addr
- start
) < length
) {
1971 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1976 /* Note: start and end must be within the same ram block. */
1977 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1981 unsigned long length
, start1
;
1984 start
&= TARGET_PAGE_MASK
;
1985 end
= TARGET_PAGE_ALIGN(end
);
1987 length
= end
- start
;
1990 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
1994 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
1995 /* Check that we don't span multiple blocks - this breaks the
1996 address comparisons below. */
1997 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
1998 != (end
- 1) - start
) {
2002 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2004 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2005 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2006 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2012 int cpu_physical_memory_set_dirty_tracking(int enable
)
2015 in_migration
= enable
;
2019 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2021 ram_addr_t ram_addr
;
2024 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2025 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2026 + tlb_entry
->addend
);
2027 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2028 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2029 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2034 /* update the TLB according to the current state of the dirty bits */
2035 void cpu_tlb_update_dirty(CPUState
*env
)
2039 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2040 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2041 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2045 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2047 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2048 tlb_entry
->addr_write
= vaddr
;
2051 /* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2058 vaddr
&= TARGET_PAGE_MASK
;
2059 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2060 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2061 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2064 /* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2069 target_ulong mask
= ~(size
- 1);
2071 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2072 env
->tlb_flush_addr
= vaddr
& mask
;
2073 env
->tlb_flush_mask
= mask
;
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask
&= env
->tlb_flush_mask
;
2080 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2083 env
->tlb_flush_addr
&= mask
;
2084 env
->tlb_flush_mask
= mask
;
2087 static bool is_ram_rom(ram_addr_t pd
)
2089 pd
&= ~TARGET_PAGE_MASK
;
2090 return pd
== io_mem_ram
.ram_addr
|| pd
== io_mem_rom
.ram_addr
;
2093 static bool is_romd(ram_addr_t pd
)
2097 pd
&= ~TARGET_PAGE_MASK
;
2098 mr
= io_mem_region
[pd
];
2099 return mr
->rom_device
&& mr
->readable
;
2102 static bool is_ram_rom_romd(ram_addr_t pd
)
2104 return is_ram_rom(pd
) || is_romd(pd
);
2107 /* Add a new TLB entry. At most one entry for a given virtual address
2108 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2109 supplied size is only used by tlb_flush_page. */
2110 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2111 target_phys_addr_t paddr
, int prot
,
2112 int mmu_idx
, target_ulong size
)
2117 target_ulong address
;
2118 target_ulong code_address
;
2119 unsigned long addend
;
2122 target_phys_addr_t iotlb
;
2124 assert(size
>= TARGET_PAGE_SIZE
);
2125 if (size
!= TARGET_PAGE_SIZE
) {
2126 tlb_add_large_page(env
, vaddr
, size
);
2128 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2130 #if defined(DEBUG_TLB)
2131 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2132 " prot=%x idx=%d pd=0x%08lx\n",
2133 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2137 if (!is_ram_rom_romd(pd
)) {
2138 /* IO memory case (romd handled later) */
2139 address
|= TLB_MMIO
;
2141 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2142 if (is_ram_rom(pd
)) {
2144 iotlb
= pd
& TARGET_PAGE_MASK
;
2145 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
)
2146 iotlb
|= io_mem_notdirty
.ram_addr
;
2148 iotlb
|= io_mem_rom
.ram_addr
;
2150 /* IO handlers are currently passed a physical address.
2151 It would be nice to pass an offset from the base address
2152 of that region. This would avoid having to special case RAM,
2153 and avoid full address decoding in every device.
2154 We can't use the high bits of pd for this because
2155 IO_MEM_ROMD uses these as a ram address. */
2156 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2157 iotlb
+= p
.region_offset
;
2160 code_address
= address
;
2161 /* Make accesses to pages with watchpoints go via the
2162 watchpoint trap routines. */
2163 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2164 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2165 /* Avoid trapping reads of pages with a write breakpoint. */
2166 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2167 iotlb
= io_mem_watch
.ram_addr
+ paddr
;
2168 address
|= TLB_MMIO
;
2174 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2175 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2176 te
= &env
->tlb_table
[mmu_idx
][index
];
2177 te
->addend
= addend
- vaddr
;
2178 if (prot
& PAGE_READ
) {
2179 te
->addr_read
= address
;
2184 if (prot
& PAGE_EXEC
) {
2185 te
->addr_code
= code_address
;
2189 if (prot
& PAGE_WRITE
) {
2190 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_rom
.ram_addr
|| is_romd(pd
)) {
2191 /* Write access calls the I/O callback. */
2192 te
->addr_write
= address
| TLB_MMIO
;
2193 } else if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
&&
2194 !cpu_physical_memory_is_dirty(pd
)) {
2195 te
->addr_write
= address
| TLB_NOTDIRTY
;
2197 te
->addr_write
= address
;
2200 te
->addr_write
= -1;
2206 void tlb_flush(CPUState
*env
, int flush_global
)
2210 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2215 * Walks guest process memory "regions" one by one
2216 * and calls callback function 'fn' for each region.
2219 struct walk_memory_regions_data
2221 walk_memory_regions_fn fn
;
2223 unsigned long start
;
2227 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2228 abi_ulong end
, int new_prot
)
2230 if (data
->start
!= -1ul) {
2231 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2237 data
->start
= (new_prot
? end
: -1ul);
2238 data
->prot
= new_prot
;
2243 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2244 abi_ulong base
, int level
, void **lp
)
2250 return walk_memory_regions_end(data
, base
, 0);
2255 for (i
= 0; i
< L2_SIZE
; ++i
) {
2256 int prot
= pd
[i
].flags
;
2258 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2259 if (prot
!= data
->prot
) {
2260 rc
= walk_memory_regions_end(data
, pa
, prot
);
2268 for (i
= 0; i
< L2_SIZE
; ++i
) {
2269 pa
= base
| ((abi_ulong
)i
<<
2270 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2271 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2281 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2283 struct walk_memory_regions_data data
;
2291 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2292 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2293 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2299 return walk_memory_regions_end(&data
, 0, 0);
2302 static int dump_region(void *priv
, abi_ulong start
,
2303 abi_ulong end
, unsigned long prot
)
2305 FILE *f
= (FILE *)priv
;
2307 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2308 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2309 start
, end
, end
- start
,
2310 ((prot
& PAGE_READ
) ? 'r' : '-'),
2311 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2312 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2317 /* dump memory mappings */
2318 void page_dump(FILE *f
)
2320 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2321 "start", "end", "size", "prot");
2322 walk_memory_regions(f
, dump_region
);
2325 int page_get_flags(target_ulong address
)
2329 p
= page_find(address
>> TARGET_PAGE_BITS
);
2335 /* Modify the flags of a page and invalidate the code if necessary.
2336 The flag PAGE_WRITE_ORG is positioned automatically depending
2337 on PAGE_WRITE. The mmap_lock should already be held. */
2338 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2340 target_ulong addr
, len
;
2342 /* This function should never be called with addresses outside the
2343 guest address space. If this assert fires, it probably indicates
2344 a missing call to h2g_valid. */
2345 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2346 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2348 assert(start
< end
);
2350 start
= start
& TARGET_PAGE_MASK
;
2351 end
= TARGET_PAGE_ALIGN(end
);
2353 if (flags
& PAGE_WRITE
) {
2354 flags
|= PAGE_WRITE_ORG
;
2357 for (addr
= start
, len
= end
- start
;
2359 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2360 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2362 /* If the write protection bit is set, then we invalidate
2364 if (!(p
->flags
& PAGE_WRITE
) &&
2365 (flags
& PAGE_WRITE
) &&
2367 tb_invalidate_phys_page(addr
, 0, NULL
);
2373 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2379 /* This function should never be called with addresses outside the
2380 guest address space. If this assert fires, it probably indicates
2381 a missing call to h2g_valid. */
2382 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2383 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2389 if (start
+ len
- 1 < start
) {
2390 /* We've wrapped around. */
2394 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2395 start
= start
& TARGET_PAGE_MASK
;
2397 for (addr
= start
, len
= end
- start
;
2399 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2400 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2403 if( !(p
->flags
& PAGE_VALID
) )
2406 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2408 if (flags
& PAGE_WRITE
) {
2409 if (!(p
->flags
& PAGE_WRITE_ORG
))
2411 /* unprotect the page if it was put read-only because it
2412 contains translated code */
2413 if (!(p
->flags
& PAGE_WRITE
)) {
2414 if (!page_unprotect(addr
, 0, NULL
))
2423 /* called from signal handler: invalidate the code and unprotect the
2424 page. Return TRUE if the fault was successfully handled. */
2425 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2429 target_ulong host_start
, host_end
, addr
;
2431 /* Technically this isn't safe inside a signal handler. However we
2432 know this only ever happens in a synchronous SEGV handler, so in
2433 practice it seems to be ok. */
2436 p
= page_find(address
>> TARGET_PAGE_BITS
);
2442 /* if the page was really writable, then we change its
2443 protection back to writable */
2444 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2445 host_start
= address
& qemu_host_page_mask
;
2446 host_end
= host_start
+ qemu_host_page_size
;
2449 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2450 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2451 p
->flags
|= PAGE_WRITE
;
2454 /* and since the content will be modified, we must invalidate
2455 the corresponding translated code. */
2456 tb_invalidate_phys_page(addr
, pc
, puc
);
2457 #ifdef DEBUG_TB_CHECK
2458 tb_invalidate_check(addr
);
2461 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2471 static inline void tlb_set_dirty(CPUState
*env
,
2472 unsigned long addr
, target_ulong vaddr
)
2475 #endif /* defined(CONFIG_USER_ONLY) */
2477 #if !defined(CONFIG_USER_ONLY)
2479 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2480 typedef struct subpage_t
{
2482 target_phys_addr_t base
;
2483 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2484 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2487 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2488 ram_addr_t memory
, ram_addr_t region_offset
);
2489 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2490 ram_addr_t orig_memory
,
2491 ram_addr_t region_offset
);
2492 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2495 if (addr > start_addr) \
2498 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2499 if (start_addr2 > 0) \
2503 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2504 end_addr2 = TARGET_PAGE_SIZE - 1; \
2506 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2507 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2512 static void destroy_page_desc(PhysPageDesc pd
)
2514 unsigned io_index
= pd
.phys_offset
& ~TARGET_PAGE_MASK
;
2515 MemoryRegion
*mr
= io_mem_region
[io_index
];
2518 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2519 memory_region_destroy(&subpage
->iomem
);
2524 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2527 PhysPageEntry
*p
= lp
->u
.node
;
2533 for (i
= 0; i
< L2_SIZE
; ++i
) {
2535 destroy_l2_mapping(&p
[i
], level
- 1);
2537 destroy_page_desc(p
[i
].u
.leaf
);
2544 static void destroy_all_mappings(void)
2546 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2549 /* register physical memory.
2550 For RAM, 'size' must be a multiple of the target page size.
2551 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2552 io memory page. The address used when calling the IO function is
2553 the offset from the start of the region, plus region_offset. Both
2554 start_addr and region_offset are rounded down to a page boundary
2555 before calculating this offset. This should not be a problem unless
2556 the low bits of start_addr and region_offset differ. */
2557 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2560 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2561 ram_addr_t size
= section
->size
;
2562 ram_addr_t phys_offset
= section
->mr
->ram_addr
;
2563 ram_addr_t region_offset
= section
->offset_within_region
;
2564 target_phys_addr_t addr
, end_addr
;
2567 ram_addr_t orig_size
= size
;
2570 if (memory_region_is_ram(section
->mr
)) {
2571 phys_offset
+= region_offset
;
2576 phys_offset
|= io_mem_rom
.ram_addr
;
2581 if (phys_offset
== io_mem_unassigned
.ram_addr
) {
2582 region_offset
= start_addr
;
2584 region_offset
&= TARGET_PAGE_MASK
;
2585 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2586 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2590 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 0);
2591 if (p
&& p
->phys_offset
!= io_mem_unassigned
.ram_addr
) {
2592 ram_addr_t orig_memory
= p
->phys_offset
;
2593 target_phys_addr_t start_addr2
, end_addr2
;
2594 int need_subpage
= 0;
2595 MemoryRegion
*mr
= io_mem_region
[orig_memory
& ~TARGET_PAGE_MASK
];
2597 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2600 if (!(mr
->subpage
)) {
2601 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2602 &p
->phys_offset
, orig_memory
,
2605 subpage
= container_of(mr
, subpage_t
, iomem
);
2607 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2609 p
->region_offset
= 0;
2611 p
->phys_offset
= phys_offset
;
2612 p
->region_offset
= region_offset
;
2613 if (is_ram_rom_romd(phys_offset
))
2614 phys_offset
+= TARGET_PAGE_SIZE
;
2617 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2618 p
->phys_offset
= phys_offset
;
2619 p
->region_offset
= region_offset
;
2620 if (is_ram_rom_romd(phys_offset
)) {
2621 phys_offset
+= TARGET_PAGE_SIZE
;
2623 target_phys_addr_t start_addr2
, end_addr2
;
2624 int need_subpage
= 0;
2626 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2627 end_addr2
, need_subpage
);
2630 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2632 io_mem_unassigned
.ram_addr
,
2633 addr
& TARGET_PAGE_MASK
);
2634 subpage_register(subpage
, start_addr2
, end_addr2
,
2635 phys_offset
, region_offset
);
2636 p
->region_offset
= 0;
2640 region_offset
+= TARGET_PAGE_SIZE
;
2641 addr
+= TARGET_PAGE_SIZE
;
2642 } while (addr
!= end_addr
);
2644 /* since each CPU stores ram addresses in its TLB cache, we must
2645 reset the modified entries */
2647 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2652 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2655 kvm_coalesce_mmio_region(addr
, size
);
2658 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2661 kvm_uncoalesce_mmio_region(addr
, size
);
2664 void qemu_flush_coalesced_mmio_buffer(void)
2667 kvm_flush_coalesced_mmio_buffer();
2670 #if defined(__linux__) && !defined(TARGET_S390X)
2672 #include <sys/vfs.h>
2674 #define HUGETLBFS_MAGIC 0x958458f6
2676 static long gethugepagesize(const char *path
)
2682 ret
= statfs(path
, &fs
);
2683 } while (ret
!= 0 && errno
== EINTR
);
2690 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2691 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2696 static void *file_ram_alloc(RAMBlock
*block
,
2706 unsigned long hpagesize
;
2708 hpagesize
= gethugepagesize(path
);
2713 if (memory
< hpagesize
) {
2717 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2718 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2722 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2726 fd
= mkstemp(filename
);
2728 perror("unable to create backing store for hugepages");
2735 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2738 * ftruncate is not supported by hugetlbfs in older
2739 * hosts, so don't bother bailing out on errors.
2740 * If anything goes wrong with it under other filesystems,
2743 if (ftruncate(fd
, memory
))
2744 perror("ftruncate");
2747 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2748 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2749 * to sidestep this quirk.
2751 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2752 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2754 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2756 if (area
== MAP_FAILED
) {
2757 perror("file_ram_alloc: can't mmap RAM pages");
2766 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2768 RAMBlock
*block
, *next_block
;
2769 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2771 if (QLIST_EMPTY(&ram_list
.blocks
))
2774 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2775 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2777 end
= block
->offset
+ block
->length
;
2779 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2780 if (next_block
->offset
>= end
) {
2781 next
= MIN(next
, next_block
->offset
);
2784 if (next
- end
>= size
&& next
- end
< mingap
) {
2786 mingap
= next
- end
;
2790 if (offset
== RAM_ADDR_MAX
) {
2791 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2799 static ram_addr_t
last_ram_offset(void)
2802 ram_addr_t last
= 0;
2804 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2805 last
= MAX(last
, block
->offset
+ block
->length
);
2810 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2812 RAMBlock
*new_block
, *block
;
2815 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2816 if (block
->offset
== addr
) {
2822 assert(!new_block
->idstr
[0]);
2824 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2825 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2827 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2831 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2833 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2834 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2835 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2842 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2845 RAMBlock
*new_block
;
2847 size
= TARGET_PAGE_ALIGN(size
);
2848 new_block
= g_malloc0(sizeof(*new_block
));
2851 new_block
->offset
= find_ram_offset(size
);
2853 new_block
->host
= host
;
2854 new_block
->flags
|= RAM_PREALLOC_MASK
;
2857 #if defined (__linux__) && !defined(TARGET_S390X)
2858 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2859 if (!new_block
->host
) {
2860 new_block
->host
= qemu_vmalloc(size
);
2861 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2864 fprintf(stderr
, "-mem-path option unsupported\n");
2868 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2869 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2870 an system defined value, which is at least 256GB. Larger systems
2871 have larger values. We put the guest between the end of data
2872 segment (system break) and this value. We use 32GB as a base to
2873 have enough room for the system break to grow. */
2874 new_block
->host
= mmap((void*)0x800000000, size
,
2875 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2876 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2877 if (new_block
->host
== MAP_FAILED
) {
2878 fprintf(stderr
, "Allocating RAM failed\n");
2882 if (xen_enabled()) {
2883 xen_ram_alloc(new_block
->offset
, size
, mr
);
2885 new_block
->host
= qemu_vmalloc(size
);
2888 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2891 new_block
->length
= size
;
2893 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2895 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2896 last_ram_offset() >> TARGET_PAGE_BITS
);
2897 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2898 0xff, size
>> TARGET_PAGE_BITS
);
2901 kvm_setup_guest_memory(new_block
->host
, size
);
2903 return new_block
->offset
;
2906 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2908 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2911 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2915 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2916 if (addr
== block
->offset
) {
2917 QLIST_REMOVE(block
, next
);
2924 void qemu_ram_free(ram_addr_t addr
)
2928 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2929 if (addr
== block
->offset
) {
2930 QLIST_REMOVE(block
, next
);
2931 if (block
->flags
& RAM_PREALLOC_MASK
) {
2933 } else if (mem_path
) {
2934 #if defined (__linux__) && !defined(TARGET_S390X)
2936 munmap(block
->host
, block
->length
);
2939 qemu_vfree(block
->host
);
2945 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2946 munmap(block
->host
, block
->length
);
2948 if (xen_enabled()) {
2949 xen_invalidate_map_cache_entry(block
->host
);
2951 qemu_vfree(block
->host
);
2963 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2970 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2971 offset
= addr
- block
->offset
;
2972 if (offset
< block
->length
) {
2973 vaddr
= block
->host
+ offset
;
2974 if (block
->flags
& RAM_PREALLOC_MASK
) {
2978 munmap(vaddr
, length
);
2980 #if defined(__linux__) && !defined(TARGET_S390X)
2983 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2986 flags
|= MAP_PRIVATE
;
2988 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2989 flags
, block
->fd
, offset
);
2991 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2992 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2999 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3000 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3001 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3004 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3005 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3009 if (area
!= vaddr
) {
3010 fprintf(stderr
, "Could not remap addr: "
3011 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
3015 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3021 #endif /* !_WIN32 */
3023 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3024 With the exception of the softmmu code in this file, this should
3025 only be used for local memory (e.g. video ram) that the device owns,
3026 and knows it isn't going to access beyond the end of the block.
3028 It should not be used for general purpose DMA.
3029 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3031 void *qemu_get_ram_ptr(ram_addr_t addr
)
3035 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3036 if (addr
- block
->offset
< block
->length
) {
3037 /* Move this entry to to start of the list. */
3038 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3039 QLIST_REMOVE(block
, next
);
3040 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3042 if (xen_enabled()) {
3043 /* We need to check if the requested address is in the RAM
3044 * because we don't want to map the entire memory in QEMU.
3045 * In that case just map until the end of the page.
3047 if (block
->offset
== 0) {
3048 return xen_map_cache(addr
, 0, 0);
3049 } else if (block
->host
== NULL
) {
3051 xen_map_cache(block
->offset
, block
->length
, 1);
3054 return block
->host
+ (addr
- block
->offset
);
3058 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3064 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3065 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3067 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3071 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3072 if (addr
- block
->offset
< block
->length
) {
3073 if (xen_enabled()) {
3074 /* We need to check if the requested address is in the RAM
3075 * because we don't want to map the entire memory in QEMU.
3076 * In that case just map until the end of the page.
3078 if (block
->offset
== 0) {
3079 return xen_map_cache(addr
, 0, 0);
3080 } else if (block
->host
== NULL
) {
3082 xen_map_cache(block
->offset
, block
->length
, 1);
3085 return block
->host
+ (addr
- block
->offset
);
3089 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3095 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3096 * but takes a size argument */
3097 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3102 if (xen_enabled()) {
3103 return xen_map_cache(addr
, *size
, 1);
3107 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3108 if (addr
- block
->offset
< block
->length
) {
3109 if (addr
- block
->offset
+ *size
> block
->length
)
3110 *size
= block
->length
- addr
+ block
->offset
;
3111 return block
->host
+ (addr
- block
->offset
);
3115 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3120 void qemu_put_ram_ptr(void *addr
)
3122 trace_qemu_put_ram_ptr(addr
);
3125 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3128 uint8_t *host
= ptr
;
3130 if (xen_enabled()) {
3131 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3135 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3136 /* This case append when the block is not mapped. */
3137 if (block
->host
== NULL
) {
3140 if (host
- block
->host
< block
->length
) {
3141 *ram_addr
= block
->offset
+ (host
- block
->host
);
3149 /* Some of the softmmu routines need to translate from a host pointer
3150 (typically a TLB entry) back to a ram offset. */
3151 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3153 ram_addr_t ram_addr
;
3155 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3156 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3162 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
3165 #ifdef DEBUG_UNASSIGNED
3166 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3168 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3169 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
3174 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
3175 uint64_t val
, unsigned size
)
3177 #ifdef DEBUG_UNASSIGNED
3178 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
3180 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3181 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
3185 static const MemoryRegionOps unassigned_mem_ops
= {
3186 .read
= unassigned_mem_read
,
3187 .write
= unassigned_mem_write
,
3188 .endianness
= DEVICE_NATIVE_ENDIAN
,
3191 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
3197 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
3198 uint64_t value
, unsigned size
)
3203 static const MemoryRegionOps error_mem_ops
= {
3204 .read
= error_mem_read
,
3205 .write
= error_mem_write
,
3206 .endianness
= DEVICE_NATIVE_ENDIAN
,
3209 static const MemoryRegionOps rom_mem_ops
= {
3210 .read
= error_mem_read
,
3211 .write
= unassigned_mem_write
,
3212 .endianness
= DEVICE_NATIVE_ENDIAN
,
3215 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
3216 uint64_t val
, unsigned size
)
3219 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3220 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3221 #if !defined(CONFIG_USER_ONLY)
3222 tb_invalidate_phys_page_fast(ram_addr
, size
);
3223 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3228 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3231 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3234 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3239 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3240 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3241 /* we remove the notdirty callback only if the code has been
3243 if (dirty_flags
== 0xff)
3244 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3247 static const MemoryRegionOps notdirty_mem_ops
= {
3248 .read
= error_mem_read
,
3249 .write
= notdirty_mem_write
,
3250 .endianness
= DEVICE_NATIVE_ENDIAN
,
3253 /* Generate a debug exception if a watchpoint has been hit. */
3254 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3256 CPUState
*env
= cpu_single_env
;
3257 target_ulong pc
, cs_base
;
3258 TranslationBlock
*tb
;
3263 if (env
->watchpoint_hit
) {
3264 /* We re-entered the check after replacing the TB. Now raise
3265 * the debug interrupt so that is will trigger after the
3266 * current instruction. */
3267 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3270 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3271 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3272 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3273 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3274 wp
->flags
|= BP_WATCHPOINT_HIT
;
3275 if (!env
->watchpoint_hit
) {
3276 env
->watchpoint_hit
= wp
;
3277 tb
= tb_find_pc(env
->mem_io_pc
);
3279 cpu_abort(env
, "check_watchpoint: could not find TB for "
3280 "pc=%p", (void *)env
->mem_io_pc
);
3282 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3283 tb_phys_invalidate(tb
, -1);
3284 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3285 env
->exception_index
= EXCP_DEBUG
;
3287 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3288 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3290 cpu_resume_from_signal(env
, NULL
);
3293 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3298 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3299 so these check for a hit then pass through to the normal out-of-line
3301 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3304 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3306 case 1: return ldub_phys(addr
);
3307 case 2: return lduw_phys(addr
);
3308 case 4: return ldl_phys(addr
);
3313 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3314 uint64_t val
, unsigned size
)
3316 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3318 case 1: stb_phys(addr
, val
);
3319 case 2: stw_phys(addr
, val
);
3320 case 4: stl_phys(addr
, val
);
3325 static const MemoryRegionOps watch_mem_ops
= {
3326 .read
= watch_mem_read
,
3327 .write
= watch_mem_write
,
3328 .endianness
= DEVICE_NATIVE_ENDIAN
,
3331 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3334 subpage_t
*mmio
= opaque
;
3335 unsigned int idx
= SUBPAGE_IDX(addr
);
3336 #if defined(DEBUG_SUBPAGE)
3337 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3338 mmio
, len
, addr
, idx
);
3341 addr
+= mmio
->region_offset
[idx
];
3342 idx
= mmio
->sub_io_index
[idx
];
3343 return io_mem_read(idx
, addr
, len
);
3346 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3347 uint64_t value
, unsigned len
)
3349 subpage_t
*mmio
= opaque
;
3350 unsigned int idx
= SUBPAGE_IDX(addr
);
3351 #if defined(DEBUG_SUBPAGE)
3352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3353 " idx %d value %"PRIx64
"\n",
3354 __func__
, mmio
, len
, addr
, idx
, value
);
3357 addr
+= mmio
->region_offset
[idx
];
3358 idx
= mmio
->sub_io_index
[idx
];
3359 io_mem_write(idx
, addr
, value
, len
);
3362 static const MemoryRegionOps subpage_ops
= {
3363 .read
= subpage_read
,
3364 .write
= subpage_write
,
3365 .endianness
= DEVICE_NATIVE_ENDIAN
,
3368 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3371 ram_addr_t raddr
= addr
;
3372 void *ptr
= qemu_get_ram_ptr(raddr
);
3374 case 1: return ldub_p(ptr
);
3375 case 2: return lduw_p(ptr
);
3376 case 4: return ldl_p(ptr
);
3381 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3382 uint64_t value
, unsigned size
)
3384 ram_addr_t raddr
= addr
;
3385 void *ptr
= qemu_get_ram_ptr(raddr
);
3387 case 1: return stb_p(ptr
, value
);
3388 case 2: return stw_p(ptr
, value
);
3389 case 4: return stl_p(ptr
, value
);
3394 static const MemoryRegionOps subpage_ram_ops
= {
3395 .read
= subpage_ram_read
,
3396 .write
= subpage_ram_write
,
3397 .endianness
= DEVICE_NATIVE_ENDIAN
,
3400 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3401 ram_addr_t memory
, ram_addr_t region_offset
)
3405 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3407 idx
= SUBPAGE_IDX(start
);
3408 eidx
= SUBPAGE_IDX(end
);
3409 #if defined(DEBUG_SUBPAGE)
3410 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3411 mmio
, start
, end
, idx
, eidx
, memory
);
3413 if ((memory
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
3414 memory
= io_mem_subpage_ram
.ram_addr
;
3416 memory
&= IO_MEM_NB_ENTRIES
- 1;
3417 for (; idx
<= eidx
; idx
++) {
3418 mmio
->sub_io_index
[idx
] = memory
;
3419 mmio
->region_offset
[idx
] = region_offset
;
3425 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3426 ram_addr_t orig_memory
,
3427 ram_addr_t region_offset
)
3432 mmio
= g_malloc0(sizeof(subpage_t
));
3435 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3436 "subpage", TARGET_PAGE_SIZE
);
3437 mmio
->iomem
.subpage
= true;
3438 subpage_memory
= mmio
->iomem
.ram_addr
;
3439 #if defined(DEBUG_SUBPAGE)
3440 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3441 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3443 *phys
= subpage_memory
;
3444 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3449 static int get_free_io_mem_idx(void)
3453 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3454 if (!io_mem_used
[i
]) {
3458 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3462 /* mem_read and mem_write are arrays of functions containing the
3463 function to access byte (index 0), word (index 1) and dword (index
3464 2). Functions can be omitted with a NULL function pointer.
3465 If io_index is non zero, the corresponding io zone is
3466 modified. If it is zero, a new io zone is allocated. The return
3467 value can be used with cpu_register_physical_memory(). (-1) is
3468 returned if error. */
3469 static int cpu_register_io_memory_fixed(int io_index
, MemoryRegion
*mr
)
3471 if (io_index
<= 0) {
3472 io_index
= get_free_io_mem_idx();
3476 if (io_index
>= IO_MEM_NB_ENTRIES
)
3480 io_mem_region
[io_index
] = mr
;
3485 int cpu_register_io_memory(MemoryRegion
*mr
)
3487 return cpu_register_io_memory_fixed(0, mr
);
3490 void cpu_unregister_io_memory(int io_index
)
3492 io_mem_region
[io_index
] = NULL
;
3493 io_mem_used
[io_index
] = 0;
3496 static void io_mem_init(void)
3500 /* Must be first: */
3501 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3502 assert(io_mem_ram
.ram_addr
== 0);
3503 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3504 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3505 "unassigned", UINT64_MAX
);
3506 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3507 "notdirty", UINT64_MAX
);
3508 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3509 "subpage-ram", UINT64_MAX
);
3513 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3514 "watch", UINT64_MAX
);
3517 static void core_begin(MemoryListener
*listener
)
3519 destroy_all_mappings();
3522 static void core_commit(MemoryListener
*listener
)
3526 static void core_region_add(MemoryListener
*listener
,
3527 MemoryRegionSection
*section
)
3529 cpu_register_physical_memory_log(section
, section
->readonly
);
3532 static void core_region_del(MemoryListener
*listener
,
3533 MemoryRegionSection
*section
)
3537 static void core_region_nop(MemoryListener
*listener
,
3538 MemoryRegionSection
*section
)
3540 cpu_register_physical_memory_log(section
, section
->readonly
);
3543 static void core_log_start(MemoryListener
*listener
,
3544 MemoryRegionSection
*section
)
3548 static void core_log_stop(MemoryListener
*listener
,
3549 MemoryRegionSection
*section
)
3553 static void core_log_sync(MemoryListener
*listener
,
3554 MemoryRegionSection
*section
)
3558 static void core_log_global_start(MemoryListener
*listener
)
3560 cpu_physical_memory_set_dirty_tracking(1);
3563 static void core_log_global_stop(MemoryListener
*listener
)
3565 cpu_physical_memory_set_dirty_tracking(0);
3568 static void core_eventfd_add(MemoryListener
*listener
,
3569 MemoryRegionSection
*section
,
3570 bool match_data
, uint64_t data
, int fd
)
3574 static void core_eventfd_del(MemoryListener
*listener
,
3575 MemoryRegionSection
*section
,
3576 bool match_data
, uint64_t data
, int fd
)
3580 static void io_begin(MemoryListener
*listener
)
3584 static void io_commit(MemoryListener
*listener
)
3588 static void io_region_add(MemoryListener
*listener
,
3589 MemoryRegionSection
*section
)
3591 iorange_init(§ion
->mr
->iorange
, &memory_region_iorange_ops
,
3592 section
->offset_within_address_space
, section
->size
);
3593 ioport_register(§ion
->mr
->iorange
);
3596 static void io_region_del(MemoryListener
*listener
,
3597 MemoryRegionSection
*section
)
3599 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3602 static void io_region_nop(MemoryListener
*listener
,
3603 MemoryRegionSection
*section
)
3607 static void io_log_start(MemoryListener
*listener
,
3608 MemoryRegionSection
*section
)
3612 static void io_log_stop(MemoryListener
*listener
,
3613 MemoryRegionSection
*section
)
3617 static void io_log_sync(MemoryListener
*listener
,
3618 MemoryRegionSection
*section
)
3622 static void io_log_global_start(MemoryListener
*listener
)
3626 static void io_log_global_stop(MemoryListener
*listener
)
3630 static void io_eventfd_add(MemoryListener
*listener
,
3631 MemoryRegionSection
*section
,
3632 bool match_data
, uint64_t data
, int fd
)
3636 static void io_eventfd_del(MemoryListener
*listener
,
3637 MemoryRegionSection
*section
,
3638 bool match_data
, uint64_t data
, int fd
)
3642 static MemoryListener core_memory_listener
= {
3643 .begin
= core_begin
,
3644 .commit
= core_commit
,
3645 .region_add
= core_region_add
,
3646 .region_del
= core_region_del
,
3647 .region_nop
= core_region_nop
,
3648 .log_start
= core_log_start
,
3649 .log_stop
= core_log_stop
,
3650 .log_sync
= core_log_sync
,
3651 .log_global_start
= core_log_global_start
,
3652 .log_global_stop
= core_log_global_stop
,
3653 .eventfd_add
= core_eventfd_add
,
3654 .eventfd_del
= core_eventfd_del
,
3658 static MemoryListener io_memory_listener
= {
3660 .commit
= io_commit
,
3661 .region_add
= io_region_add
,
3662 .region_del
= io_region_del
,
3663 .region_nop
= io_region_nop
,
3664 .log_start
= io_log_start
,
3665 .log_stop
= io_log_stop
,
3666 .log_sync
= io_log_sync
,
3667 .log_global_start
= io_log_global_start
,
3668 .log_global_stop
= io_log_global_stop
,
3669 .eventfd_add
= io_eventfd_add
,
3670 .eventfd_del
= io_eventfd_del
,
3674 static void memory_map_init(void)
3676 system_memory
= g_malloc(sizeof(*system_memory
));
3677 memory_region_init(system_memory
, "system", INT64_MAX
);
3678 set_system_memory_map(system_memory
);
3680 system_io
= g_malloc(sizeof(*system_io
));
3681 memory_region_init(system_io
, "io", 65536);
3682 set_system_io_map(system_io
);
3684 memory_listener_register(&core_memory_listener
, system_memory
);
3685 memory_listener_register(&io_memory_listener
, system_io
);
3688 MemoryRegion
*get_system_memory(void)
3690 return system_memory
;
3693 MemoryRegion
*get_system_io(void)
3698 #endif /* !defined(CONFIG_USER_ONLY) */
3700 /* physical memory access (slow version, mainly for debug) */
3701 #if defined(CONFIG_USER_ONLY)
3702 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3703 uint8_t *buf
, int len
, int is_write
)
3710 page
= addr
& TARGET_PAGE_MASK
;
3711 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3714 flags
= page_get_flags(page
);
3715 if (!(flags
& PAGE_VALID
))
3718 if (!(flags
& PAGE_WRITE
))
3720 /* XXX: this code should not depend on lock_user */
3721 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3724 unlock_user(p
, addr
, l
);
3726 if (!(flags
& PAGE_READ
))
3728 /* XXX: this code should not depend on lock_user */
3729 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3732 unlock_user(p
, addr
, 0);
3742 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3743 int len
, int is_write
)
3748 target_phys_addr_t page
;
3753 page
= addr
& TARGET_PAGE_MASK
;
3754 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3757 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3761 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3762 target_phys_addr_t addr1
;
3763 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3764 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3765 /* XXX: could force cpu_single_env to NULL to avoid
3767 if (l
>= 4 && ((addr1
& 3) == 0)) {
3768 /* 32 bit write access */
3770 io_mem_write(io_index
, addr1
, val
, 4);
3772 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3773 /* 16 bit write access */
3775 io_mem_write(io_index
, addr1
, val
, 2);
3778 /* 8 bit write access */
3780 io_mem_write(io_index
, addr1
, val
, 1);
3785 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3787 ptr
= qemu_get_ram_ptr(addr1
);
3788 memcpy(ptr
, buf
, l
);
3789 if (!cpu_physical_memory_is_dirty(addr1
)) {
3790 /* invalidate code */
3791 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3793 cpu_physical_memory_set_dirty_flags(
3794 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3796 qemu_put_ram_ptr(ptr
);
3799 if (!is_ram_rom_romd(pd
)) {
3800 target_phys_addr_t addr1
;
3802 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3803 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3804 if (l
>= 4 && ((addr1
& 3) == 0)) {
3805 /* 32 bit read access */
3806 val
= io_mem_read(io_index
, addr1
, 4);
3809 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3810 /* 16 bit read access */
3811 val
= io_mem_read(io_index
, addr1
, 2);
3815 /* 8 bit read access */
3816 val
= io_mem_read(io_index
, addr1
, 1);
3822 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3823 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3824 qemu_put_ram_ptr(ptr
);
3833 /* used for ROM loading : can write in RAM and ROM */
3834 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3835 const uint8_t *buf
, int len
)
3839 target_phys_addr_t page
;
3844 page
= addr
& TARGET_PAGE_MASK
;
3845 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3848 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3851 if (!is_ram_rom_romd(pd
)) {
3854 unsigned long addr1
;
3855 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3857 ptr
= qemu_get_ram_ptr(addr1
);
3858 memcpy(ptr
, buf
, l
);
3859 qemu_put_ram_ptr(ptr
);
3869 target_phys_addr_t addr
;
3870 target_phys_addr_t len
;
3873 static BounceBuffer bounce
;
3875 typedef struct MapClient
{
3877 void (*callback
)(void *opaque
);
3878 QLIST_ENTRY(MapClient
) link
;
3881 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3882 = QLIST_HEAD_INITIALIZER(map_client_list
);
3884 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3886 MapClient
*client
= g_malloc(sizeof(*client
));
3888 client
->opaque
= opaque
;
3889 client
->callback
= callback
;
3890 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3894 void cpu_unregister_map_client(void *_client
)
3896 MapClient
*client
= (MapClient
*)_client
;
3898 QLIST_REMOVE(client
, link
);
3902 static void cpu_notify_map_clients(void)
3906 while (!QLIST_EMPTY(&map_client_list
)) {
3907 client
= QLIST_FIRST(&map_client_list
);
3908 client
->callback(client
->opaque
);
3909 cpu_unregister_map_client(client
);
3913 /* Map a physical memory region into a host virtual address.
3914 * May map a subset of the requested range, given by and returned in *plen.
3915 * May return NULL if resources needed to perform the mapping are exhausted.
3916 * Use only for reads OR writes - not for read-modify-write operations.
3917 * Use cpu_register_map_client() to know when retrying the map operation is
3918 * likely to succeed.
3920 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3921 target_phys_addr_t
*plen
,
3924 target_phys_addr_t len
= *plen
;
3925 target_phys_addr_t todo
= 0;
3927 target_phys_addr_t page
;
3930 ram_addr_t raddr
= RAM_ADDR_MAX
;
3935 page
= addr
& TARGET_PAGE_MASK
;
3936 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3939 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3942 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3943 if (todo
|| bounce
.buffer
) {
3946 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3950 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3954 return bounce
.buffer
;
3957 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3965 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3970 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3971 * Will also mark the memory as dirty if is_write == 1. access_len gives
3972 * the amount of memory that was actually read or written by the caller.
3974 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3975 int is_write
, target_phys_addr_t access_len
)
3977 if (buffer
!= bounce
.buffer
) {
3979 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3980 while (access_len
) {
3982 l
= TARGET_PAGE_SIZE
;
3985 if (!cpu_physical_memory_is_dirty(addr1
)) {
3986 /* invalidate code */
3987 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3989 cpu_physical_memory_set_dirty_flags(
3990 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3996 if (xen_enabled()) {
3997 xen_invalidate_map_cache_entry(buffer
);
4002 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4004 qemu_vfree(bounce
.buffer
);
4005 bounce
.buffer
= NULL
;
4006 cpu_notify_map_clients();
4009 /* warning: addr must be aligned */
4010 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4011 enum device_endian endian
)
4019 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4022 if (!is_ram_rom_romd(pd
)) {
4024 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4025 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4026 val
= io_mem_read(io_index
, addr
, 4);
4027 #if defined(TARGET_WORDS_BIGENDIAN)
4028 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4032 if (endian
== DEVICE_BIG_ENDIAN
) {
4038 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4039 (addr
& ~TARGET_PAGE_MASK
);
4041 case DEVICE_LITTLE_ENDIAN
:
4042 val
= ldl_le_p(ptr
);
4044 case DEVICE_BIG_ENDIAN
:
4045 val
= ldl_be_p(ptr
);
4055 uint32_t ldl_phys(target_phys_addr_t addr
)
4057 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4060 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4062 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4065 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4067 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4070 /* warning: addr must be aligned */
4071 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4072 enum device_endian endian
)
4080 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4083 if (!is_ram_rom_romd(pd
)) {
4085 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4086 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4088 /* XXX This is broken when device endian != cpu endian.
4089 Fix and add "endian" variable check */
4090 #ifdef TARGET_WORDS_BIGENDIAN
4091 val
= io_mem_read(io_index
, addr
, 4) << 32;
4092 val
|= io_mem_read(io_index
, addr
+ 4, 4);
4094 val
= io_mem_read(io_index
, addr
, 4);
4095 val
|= io_mem_read(io_index
, addr
+ 4, 4) << 32;
4099 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4100 (addr
& ~TARGET_PAGE_MASK
);
4102 case DEVICE_LITTLE_ENDIAN
:
4103 val
= ldq_le_p(ptr
);
4105 case DEVICE_BIG_ENDIAN
:
4106 val
= ldq_be_p(ptr
);
4116 uint64_t ldq_phys(target_phys_addr_t addr
)
4118 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4121 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4123 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4126 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4128 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4132 uint32_t ldub_phys(target_phys_addr_t addr
)
4135 cpu_physical_memory_read(addr
, &val
, 1);
4139 /* warning: addr must be aligned */
4140 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4141 enum device_endian endian
)
4149 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4152 if (!is_ram_rom_romd(pd
)) {
4154 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4155 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4156 val
= io_mem_read(io_index
, addr
, 2);
4157 #if defined(TARGET_WORDS_BIGENDIAN)
4158 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4162 if (endian
== DEVICE_BIG_ENDIAN
) {
4168 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4169 (addr
& ~TARGET_PAGE_MASK
);
4171 case DEVICE_LITTLE_ENDIAN
:
4172 val
= lduw_le_p(ptr
);
4174 case DEVICE_BIG_ENDIAN
:
4175 val
= lduw_be_p(ptr
);
4185 uint32_t lduw_phys(target_phys_addr_t addr
)
4187 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4190 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4192 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4195 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4197 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4200 /* warning: addr must be aligned. The ram page is not masked as dirty
4201 and the code inside is not invalidated. It is useful if the dirty
4202 bits are used to track modified PTEs */
4203 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4210 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4213 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4214 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4215 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4216 io_mem_write(io_index
, addr
, val
, 4);
4218 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4219 ptr
= qemu_get_ram_ptr(addr1
);
4222 if (unlikely(in_migration
)) {
4223 if (!cpu_physical_memory_is_dirty(addr1
)) {
4224 /* invalidate code */
4225 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4227 cpu_physical_memory_set_dirty_flags(
4228 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4234 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4241 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4244 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4245 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4246 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4247 #ifdef TARGET_WORDS_BIGENDIAN
4248 io_mem_write(io_index
, addr
, val
>> 32, 4);
4249 io_mem_write(io_index
, addr
+ 4, (uint32_t)val
, 4);
4251 io_mem_write(io_index
, addr
, (uint32_t)val
, 4);
4252 io_mem_write(io_index
, addr
+ 4, val
>> 32, 4);
4255 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4256 (addr
& ~TARGET_PAGE_MASK
);
4261 /* warning: addr must be aligned */
4262 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4263 enum device_endian endian
)
4270 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4273 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4274 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4275 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4276 #if defined(TARGET_WORDS_BIGENDIAN)
4277 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4281 if (endian
== DEVICE_BIG_ENDIAN
) {
4285 io_mem_write(io_index
, addr
, val
, 4);
4287 unsigned long addr1
;
4288 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4290 ptr
= qemu_get_ram_ptr(addr1
);
4292 case DEVICE_LITTLE_ENDIAN
:
4295 case DEVICE_BIG_ENDIAN
:
4302 if (!cpu_physical_memory_is_dirty(addr1
)) {
4303 /* invalidate code */
4304 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4306 cpu_physical_memory_set_dirty_flags(addr1
,
4307 (0xff & ~CODE_DIRTY_FLAG
));
4312 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4314 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4317 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4319 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4322 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4324 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4328 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4331 cpu_physical_memory_write(addr
, &v
, 1);
4334 /* warning: addr must be aligned */
4335 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4336 enum device_endian endian
)
4343 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4346 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4347 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4348 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4349 #if defined(TARGET_WORDS_BIGENDIAN)
4350 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4354 if (endian
== DEVICE_BIG_ENDIAN
) {
4358 io_mem_write(io_index
, addr
, val
, 2);
4360 unsigned long addr1
;
4361 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4363 ptr
= qemu_get_ram_ptr(addr1
);
4365 case DEVICE_LITTLE_ENDIAN
:
4368 case DEVICE_BIG_ENDIAN
:
4375 if (!cpu_physical_memory_is_dirty(addr1
)) {
4376 /* invalidate code */
4377 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4379 cpu_physical_memory_set_dirty_flags(addr1
,
4380 (0xff & ~CODE_DIRTY_FLAG
));
4385 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4387 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4390 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4392 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4395 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4397 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4401 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4404 cpu_physical_memory_write(addr
, &val
, 8);
4407 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4409 val
= cpu_to_le64(val
);
4410 cpu_physical_memory_write(addr
, &val
, 8);
4413 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4415 val
= cpu_to_be64(val
);
4416 cpu_physical_memory_write(addr
, &val
, 8);
4419 /* virtual memory access for debug (includes writing to ROM) */
4420 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4421 uint8_t *buf
, int len
, int is_write
)
4424 target_phys_addr_t phys_addr
;
4428 page
= addr
& TARGET_PAGE_MASK
;
4429 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4430 /* if no physical page mapped, return an error */
4431 if (phys_addr
== -1)
4433 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4436 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4438 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4440 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4449 /* in deterministic execution mode, instructions doing device I/Os
4450 must be at the end of the TB */
4451 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4453 TranslationBlock
*tb
;
4455 target_ulong pc
, cs_base
;
4458 tb
= tb_find_pc((unsigned long)retaddr
);
4460 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4463 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4464 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4465 /* Calculate how many instructions had been executed before the fault
4467 n
= n
- env
->icount_decr
.u16
.low
;
4468 /* Generate a new TB ending on the I/O insn. */
4470 /* On MIPS and SH, delay slot instructions can only be restarted if
4471 they were already the first instruction in the TB. If this is not
4472 the first instruction in a TB then re-execute the preceding
4474 #if defined(TARGET_MIPS)
4475 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4476 env
->active_tc
.PC
-= 4;
4477 env
->icount_decr
.u16
.low
++;
4478 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4480 #elif defined(TARGET_SH4)
4481 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4484 env
->icount_decr
.u16
.low
++;
4485 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4488 /* This should never happen. */
4489 if (n
> CF_COUNT_MASK
)
4490 cpu_abort(env
, "TB too big during recompile");
4492 cflags
= n
| CF_LAST_IO
;
4494 cs_base
= tb
->cs_base
;
4496 tb_phys_invalidate(tb
, -1);
4497 /* FIXME: In theory this could raise an exception. In practice
4498 we have already translated the block once so it's probably ok. */
4499 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4500 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4501 the first in the TB) then we end up generating a whole new TB and
4502 repeating the fault, which is horribly inefficient.
4503 Better would be to execute just this insn uncached, or generate a
4505 cpu_resume_from_signal(env
, NULL
);
4508 #if !defined(CONFIG_USER_ONLY)
4510 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4512 int i
, target_code_size
, max_target_code_size
;
4513 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4514 TranslationBlock
*tb
;
4516 target_code_size
= 0;
4517 max_target_code_size
= 0;
4519 direct_jmp_count
= 0;
4520 direct_jmp2_count
= 0;
4521 for(i
= 0; i
< nb_tbs
; i
++) {
4523 target_code_size
+= tb
->size
;
4524 if (tb
->size
> max_target_code_size
)
4525 max_target_code_size
= tb
->size
;
4526 if (tb
->page_addr
[1] != -1)
4528 if (tb
->tb_next_offset
[0] != 0xffff) {
4530 if (tb
->tb_next_offset
[1] != 0xffff) {
4531 direct_jmp2_count
++;
4535 /* XXX: avoid using doubles ? */
4536 cpu_fprintf(f
, "Translation buffer state:\n");
4537 cpu_fprintf(f
, "gen code size %td/%ld\n",
4538 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4539 cpu_fprintf(f
, "TB count %d/%d\n",
4540 nb_tbs
, code_gen_max_blocks
);
4541 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4542 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4543 max_target_code_size
);
4544 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4545 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4546 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4547 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4549 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4550 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4552 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4554 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4555 cpu_fprintf(f
, "\nStatistics:\n");
4556 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4557 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4558 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4559 tcg_dump_info(f
, cpu_fprintf
);
4562 /* NOTE: this function can trigger an exception */
4563 /* NOTE2: the returned address is not exactly the physical address: it
4564 is the offset relative to phys_ram_base */
4565 tb_page_addr_t
get_page_addr_code(CPUState
*env1
, target_ulong addr
)
4567 int mmu_idx
, page_index
, pd
;
4570 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
4571 mmu_idx
= cpu_mmu_index(env1
);
4572 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
4573 (addr
& TARGET_PAGE_MASK
))) {
4576 pd
= env1
->tlb_table
[mmu_idx
][page_index
].addr_code
& ~TARGET_PAGE_MASK
;
4577 if (pd
!= io_mem_ram
.ram_addr
&& pd
!= io_mem_rom
.ram_addr
4579 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4580 cpu_unassigned_access(env1
, addr
, 0, 1, 0, 4);
4582 cpu_abort(env1
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
4585 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
4586 return qemu_ram_addr_from_host_nofail(p
);
4590 * A helper function for the _utterly broken_ virtio device model to find out if
4591 * it's running on a big endian machine. Don't do this at home kids!
4593 bool virtio_is_big_endian(void);
4594 bool virtio_is_big_endian(void)
4596 #if defined(TARGET_WORDS_BIGENDIAN)
4603 #define MMUSUFFIX _cmmu
4605 #define GETPC() NULL
4606 #define env cpu_single_env
4607 #define SOFTMMU_CODE_ACCESS
4610 #include "softmmu_template.h"
4613 #include "softmmu_template.h"
4616 #include "softmmu_template.h"
4619 #include "softmmu_template.h"