2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
40 #if defined(CONFIG_USER_ONLY)
44 //#define DEBUG_TB_INVALIDATE
47 //#define DEBUG_UNASSIGNED
49 /* make various TB consistency checks */
50 //#define DEBUG_TB_CHECK
51 //#define DEBUG_TLB_CHECK
53 //#define DEBUG_IOPORT
54 //#define DEBUG_SUBPAGE
56 #if !defined(CONFIG_USER_ONLY)
57 /* TB consistency checks only implemented for usermode emulation. */
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 TranslationBlock
*tbs
;
85 int code_gen_max_blocks
;
86 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
91 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
92 uint8_t *code_gen_buffer
;
93 unsigned long code_gen_buffer_size
;
94 /* threshold to flush the translated code buffer */
95 unsigned long code_gen_buffer_max_size
;
96 uint8_t *code_gen_ptr
;
98 #if !defined(CONFIG_USER_ONLY)
99 ram_addr_t phys_ram_size
;
101 uint8_t *phys_ram_base
;
102 uint8_t *phys_ram_dirty
;
103 static ram_addr_t phys_ram_alloc_offset
= 0;
107 /* current CPU in the current thread. It is only valid inside
109 CPUState
*cpu_single_env
;
111 typedef struct PageDesc
{
112 /* list of TBs intersecting this ram page */
113 TranslationBlock
*first_tb
;
114 /* in order to optimize self modifying code, we count the number
115 of lookups we do to a given page to use a bitmap */
116 unsigned int code_write_count
;
117 uint8_t *code_bitmap
;
118 #if defined(CONFIG_USER_ONLY)
123 typedef struct PhysPageDesc
{
124 /* offset in host memory of the page + io_index in the low bits */
125 ram_addr_t phys_offset
;
129 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130 /* XXX: this is a temporary hack for alpha target.
131 * In the future, this is to be replaced by a multi-level table
132 * to actually be able to handle the complete 64 bits address space.
134 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
136 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
142 unsigned long qemu_real_host_page_size
;
143 unsigned long qemu_host_page_bits
;
144 unsigned long qemu_host_page_size
;
145 unsigned long qemu_host_page_mask
;
147 /* XXX: for system emulation, it could just be an array */
148 static PageDesc
*l1_map
[L1_SIZE
];
149 PhysPageDesc
**l1_phys_map
;
151 #if !defined(CONFIG_USER_ONLY)
152 static void io_mem_init(void);
154 /* io memory support */
155 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
156 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
157 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
158 static int io_mem_nb
;
159 static int io_mem_watch
;
163 char *logfilename
= "/tmp/qemu.log";
166 static int log_append
= 0;
169 static int tlb_flush_count
;
170 static int tb_flush_count
;
171 static int tb_phys_invalidate_count
;
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t
{
175 target_phys_addr_t base
;
176 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
177 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
178 void *opaque
[TARGET_PAGE_SIZE
][2][4];
182 static void map_exec(void *addr
, long size
)
185 VirtualProtect(addr
, size
,
186 PAGE_EXECUTE_READWRITE
, &old_protect
);
190 static void map_exec(void *addr
, long size
)
192 unsigned long start
, end
, page_size
;
194 page_size
= getpagesize();
195 start
= (unsigned long)addr
;
196 start
&= ~(page_size
- 1);
198 end
= (unsigned long)addr
+ size
;
199 end
+= page_size
- 1;
200 end
&= ~(page_size
- 1);
202 mprotect((void *)start
, end
- start
,
203 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
207 static void page_init(void)
209 /* NOTE: we can always suppose that qemu_host_page_size >=
213 SYSTEM_INFO system_info
;
216 GetSystemInfo(&system_info
);
217 qemu_real_host_page_size
= system_info
.dwPageSize
;
220 qemu_real_host_page_size
= getpagesize();
222 if (qemu_host_page_size
== 0)
223 qemu_host_page_size
= qemu_real_host_page_size
;
224 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
225 qemu_host_page_size
= TARGET_PAGE_SIZE
;
226 qemu_host_page_bits
= 0;
227 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
228 qemu_host_page_bits
++;
229 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
230 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
231 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
233 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
235 long long startaddr
, endaddr
;
240 last_brk
= (unsigned long)sbrk(0);
241 f
= fopen("/proc/self/maps", "r");
244 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
246 startaddr
= MIN(startaddr
,
247 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
248 endaddr
= MIN(endaddr
,
249 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
250 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
251 TARGET_PAGE_ALIGN(endaddr
),
262 static inline PageDesc
*page_find_alloc(target_ulong index
)
266 #if TARGET_LONG_BITS > 32
267 /* Host memory outside guest VM. For 32-bit targets we have already
268 excluded high addresses. */
269 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
* TARGET_PAGE_SIZE
))
272 lp
= &l1_map
[index
>> L2_BITS
];
275 /* allocate if not found */
276 #if defined(CONFIG_USER_ONLY)
278 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
279 /* Don't use qemu_malloc because it may recurse. */
280 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
281 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
284 if (addr
== (target_ulong
)addr
) {
285 page_set_flags(addr
& TARGET_PAGE_MASK
,
286 TARGET_PAGE_ALIGN(addr
+ len
),
290 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
294 return p
+ (index
& (L2_SIZE
- 1));
297 static inline PageDesc
*page_find(target_ulong index
)
301 p
= l1_map
[index
>> L2_BITS
];
304 return p
+ (index
& (L2_SIZE
- 1));
307 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
312 p
= (void **)l1_phys_map
;
313 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
315 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
316 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
318 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
321 /* allocate if not found */
324 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
325 memset(p
, 0, sizeof(void *) * L1_SIZE
);
329 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
333 /* allocate if not found */
336 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
338 for (i
= 0; i
< L2_SIZE
; i
++)
339 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
341 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
344 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
346 return phys_page_find_alloc(index
, 0);
349 #if !defined(CONFIG_USER_ONLY)
350 static void tlb_protect_code(ram_addr_t ram_addr
);
351 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
353 #define mmap_lock() do { } while(0)
354 #define mmap_unlock() do { } while(0)
357 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
359 #if defined(CONFIG_USER_ONLY)
360 /* Currently it is not recommanded to allocate big chunks of data in
361 user mode. It will change when a dedicated libc will be used */
362 #define USE_STATIC_CODE_GEN_BUFFER
365 #ifdef USE_STATIC_CODE_GEN_BUFFER
366 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
369 void code_gen_alloc(unsigned long tb_size
)
371 #ifdef USE_STATIC_CODE_GEN_BUFFER
372 code_gen_buffer
= static_code_gen_buffer
;
373 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
374 map_exec(code_gen_buffer
, code_gen_buffer_size
);
376 code_gen_buffer_size
= tb_size
;
377 if (code_gen_buffer_size
== 0) {
378 #if defined(CONFIG_USER_ONLY)
379 /* in user mode, phys_ram_size is not meaningful */
380 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
382 /* XXX: needs ajustments */
383 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
386 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
387 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
388 /* The code gen buffer location may have constraints depending on
389 the host cpu and OS */
390 #if defined(__linux__)
393 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
394 #if defined(__x86_64__)
396 /* Cannot map more than that */
397 if (code_gen_buffer_size
> (800 * 1024 * 1024))
398 code_gen_buffer_size
= (800 * 1024 * 1024);
400 code_gen_buffer
= mmap(NULL
, code_gen_buffer_size
,
401 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
403 if (code_gen_buffer
== MAP_FAILED
) {
404 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
409 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
410 if (!code_gen_buffer
) {
411 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
414 map_exec(code_gen_buffer
, code_gen_buffer_size
);
416 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
417 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
418 code_gen_buffer_max_size
= code_gen_buffer_size
-
419 code_gen_max_block_size();
420 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
421 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
424 /* Must be called before using the QEMU cpus. 'tb_size' is the size
425 (in bytes) allocated to the translation buffer. Zero means default
427 void cpu_exec_init_all(unsigned long tb_size
)
430 code_gen_alloc(tb_size
);
431 code_gen_ptr
= code_gen_buffer
;
433 #if !defined(CONFIG_USER_ONLY)
438 void cpu_exec_init(CPUState
*env
)
443 env
->next_cpu
= NULL
;
446 while (*penv
!= NULL
) {
447 penv
= (CPUState
**)&(*penv
)->next_cpu
;
450 env
->cpu_index
= cpu_index
;
451 env
->nb_watchpoints
= 0;
455 static inline void invalidate_page_bitmap(PageDesc
*p
)
457 if (p
->code_bitmap
) {
458 qemu_free(p
->code_bitmap
);
459 p
->code_bitmap
= NULL
;
461 p
->code_write_count
= 0;
464 /* set to NULL all the 'first_tb' fields in all PageDescs */
465 static void page_flush_tb(void)
470 for(i
= 0; i
< L1_SIZE
; i
++) {
473 for(j
= 0; j
< L2_SIZE
; j
++) {
475 invalidate_page_bitmap(p
);
482 /* flush all the translation blocks */
483 /* XXX: tb_flush is currently not thread safe */
484 void tb_flush(CPUState
*env1
)
487 #if defined(DEBUG_FLUSH)
488 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
489 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
491 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
493 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
494 cpu_abort(env1
, "Internal error: code buffer overflow\n");
498 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
499 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
502 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
505 code_gen_ptr
= code_gen_buffer
;
506 /* XXX: flush processor icache at this point if cache flush is
511 #ifdef DEBUG_TB_CHECK
513 static void tb_invalidate_check(target_ulong address
)
515 TranslationBlock
*tb
;
517 address
&= TARGET_PAGE_MASK
;
518 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
519 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
520 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
521 address
>= tb
->pc
+ tb
->size
)) {
522 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
523 address
, (long)tb
->pc
, tb
->size
);
529 /* verify that all the pages have correct rights for code */
530 static void tb_page_check(void)
532 TranslationBlock
*tb
;
533 int i
, flags1
, flags2
;
535 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
536 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
537 flags1
= page_get_flags(tb
->pc
);
538 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
539 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
540 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
541 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
547 void tb_jmp_check(TranslationBlock
*tb
)
549 TranslationBlock
*tb1
;
552 /* suppress any remaining jumps to this TB */
556 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
559 tb1
= tb1
->jmp_next
[n1
];
561 /* check end of list */
563 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
569 /* invalidate one TB */
570 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
573 TranslationBlock
*tb1
;
577 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
580 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
584 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
586 TranslationBlock
*tb1
;
592 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
594 *ptb
= tb1
->page_next
[n1
];
597 ptb
= &tb1
->page_next
[n1
];
601 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
603 TranslationBlock
*tb1
, **ptb
;
606 ptb
= &tb
->jmp_next
[n
];
609 /* find tb(n) in circular list */
613 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
614 if (n1
== n
&& tb1
== tb
)
617 ptb
= &tb1
->jmp_first
;
619 ptb
= &tb1
->jmp_next
[n1
];
622 /* now we can suppress tb(n) from the list */
623 *ptb
= tb
->jmp_next
[n
];
625 tb
->jmp_next
[n
] = NULL
;
629 /* reset the jump entry 'n' of a TB so that it is not chained to
631 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
633 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
636 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
641 target_phys_addr_t phys_pc
;
642 TranslationBlock
*tb1
, *tb2
;
644 /* remove the TB from the hash list */
645 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
646 h
= tb_phys_hash_func(phys_pc
);
647 tb_remove(&tb_phys_hash
[h
], tb
,
648 offsetof(TranslationBlock
, phys_hash_next
));
650 /* remove the TB from the page list */
651 if (tb
->page_addr
[0] != page_addr
) {
652 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
653 tb_page_remove(&p
->first_tb
, tb
);
654 invalidate_page_bitmap(p
);
656 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
657 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
658 tb_page_remove(&p
->first_tb
, tb
);
659 invalidate_page_bitmap(p
);
662 tb_invalidated_flag
= 1;
664 /* remove the TB from the hash list */
665 h
= tb_jmp_cache_hash_func(tb
->pc
);
666 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
667 if (env
->tb_jmp_cache
[h
] == tb
)
668 env
->tb_jmp_cache
[h
] = NULL
;
671 /* suppress this TB from the two jump lists */
672 tb_jmp_remove(tb
, 0);
673 tb_jmp_remove(tb
, 1);
675 /* suppress any remaining jumps to this TB */
681 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
682 tb2
= tb1
->jmp_next
[n1
];
683 tb_reset_jump(tb1
, n1
);
684 tb1
->jmp_next
[n1
] = NULL
;
687 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
689 tb_phys_invalidate_count
++;
692 static inline void set_bits(uint8_t *tab
, int start
, int len
)
698 mask
= 0xff << (start
& 7);
699 if ((start
& ~7) == (end
& ~7)) {
701 mask
&= ~(0xff << (end
& 7));
706 start
= (start
+ 8) & ~7;
708 while (start
< end1
) {
713 mask
= ~(0xff << (end
& 7));
719 static void build_page_bitmap(PageDesc
*p
)
721 int n
, tb_start
, tb_end
;
722 TranslationBlock
*tb
;
724 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
727 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
732 tb
= (TranslationBlock
*)((long)tb
& ~3);
733 /* NOTE: this is subtle as a TB may span two physical pages */
735 /* NOTE: tb_end may be after the end of the page, but
736 it is not a problem */
737 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
738 tb_end
= tb_start
+ tb
->size
;
739 if (tb_end
> TARGET_PAGE_SIZE
)
740 tb_end
= TARGET_PAGE_SIZE
;
743 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
745 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
746 tb
= tb
->page_next
[n
];
750 #ifdef TARGET_HAS_PRECISE_SMC
752 static void tb_gen_code(CPUState
*env
,
753 target_ulong pc
, target_ulong cs_base
, int flags
,
756 TranslationBlock
*tb
;
758 target_ulong phys_pc
, phys_page2
, virt_page2
;
761 phys_pc
= get_phys_addr_code(env
, pc
);
764 /* flush must be done */
766 /* cannot fail at this point */
769 tc_ptr
= code_gen_ptr
;
771 tb
->cs_base
= cs_base
;
774 cpu_gen_code(env
, tb
, &code_gen_size
);
775 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
777 /* check next page if needed */
778 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
780 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
781 phys_page2
= get_phys_addr_code(env
, virt_page2
);
783 tb_link_phys(tb
, phys_pc
, phys_page2
);
787 /* invalidate all TBs which intersect with the target physical page
788 starting in range [start;end[. NOTE: start and end must refer to
789 the same physical page. 'is_cpu_write_access' should be true if called
790 from a real cpu write access: the virtual CPU will exit the current
791 TB if code is modified inside this TB. */
792 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
793 int is_cpu_write_access
)
795 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
796 CPUState
*env
= cpu_single_env
;
798 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
799 target_ulong tb_start
, tb_end
;
800 target_ulong current_pc
, current_cs_base
;
802 p
= page_find(start
>> TARGET_PAGE_BITS
);
805 if (!p
->code_bitmap
&&
806 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
807 is_cpu_write_access
) {
808 /* build code bitmap */
809 build_page_bitmap(p
);
812 /* we remove all the TBs in the range [start, end[ */
813 /* XXX: see if in some cases it could be faster to invalidate all the code */
814 current_tb_not_found
= is_cpu_write_access
;
815 current_tb_modified
= 0;
816 current_tb
= NULL
; /* avoid warning */
817 current_pc
= 0; /* avoid warning */
818 current_cs_base
= 0; /* avoid warning */
819 current_flags
= 0; /* avoid warning */
823 tb
= (TranslationBlock
*)((long)tb
& ~3);
824 tb_next
= tb
->page_next
[n
];
825 /* NOTE: this is subtle as a TB may span two physical pages */
827 /* NOTE: tb_end may be after the end of the page, but
828 it is not a problem */
829 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
830 tb_end
= tb_start
+ tb
->size
;
832 tb_start
= tb
->page_addr
[1];
833 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
835 if (!(tb_end
<= start
|| tb_start
>= end
)) {
836 #ifdef TARGET_HAS_PRECISE_SMC
837 if (current_tb_not_found
) {
838 current_tb_not_found
= 0;
840 if (env
->mem_write_pc
) {
841 /* now we have a real cpu fault */
842 current_tb
= tb_find_pc(env
->mem_write_pc
);
845 if (current_tb
== tb
&&
846 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
847 /* If we are modifying the current TB, we must stop
848 its execution. We could be more precise by checking
849 that the modification is after the current PC, but it
850 would require a specialized function to partially
851 restore the CPU state */
853 current_tb_modified
= 1;
854 cpu_restore_state(current_tb
, env
,
855 env
->mem_write_pc
, NULL
);
856 #if defined(TARGET_I386)
857 current_flags
= env
->hflags
;
858 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
859 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
860 current_pc
= current_cs_base
+ env
->eip
;
862 #error unsupported CPU
865 #endif /* TARGET_HAS_PRECISE_SMC */
866 /* we need to do that to handle the case where a signal
867 occurs while doing tb_phys_invalidate() */
870 saved_tb
= env
->current_tb
;
871 env
->current_tb
= NULL
;
873 tb_phys_invalidate(tb
, -1);
875 env
->current_tb
= saved_tb
;
876 if (env
->interrupt_request
&& env
->current_tb
)
877 cpu_interrupt(env
, env
->interrupt_request
);
882 #if !defined(CONFIG_USER_ONLY)
883 /* if no code remaining, no need to continue to use slow writes */
885 invalidate_page_bitmap(p
);
886 if (is_cpu_write_access
) {
887 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
891 #ifdef TARGET_HAS_PRECISE_SMC
892 if (current_tb_modified
) {
893 /* we generate a block containing just the instruction
894 modifying the memory. It will ensure that it cannot modify
896 env
->current_tb
= NULL
;
897 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
899 cpu_resume_from_signal(env
, NULL
);
904 /* len must be <= 8 and start must be a multiple of len */
905 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
912 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
913 cpu_single_env
->mem_write_vaddr
, len
,
915 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
919 p
= page_find(start
>> TARGET_PAGE_BITS
);
922 if (p
->code_bitmap
) {
923 offset
= start
& ~TARGET_PAGE_MASK
;
924 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
925 if (b
& ((1 << len
) - 1))
929 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
933 #if !defined(CONFIG_SOFTMMU)
934 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
935 unsigned long pc
, void *puc
)
937 int n
, current_flags
, current_tb_modified
;
938 target_ulong current_pc
, current_cs_base
;
940 TranslationBlock
*tb
, *current_tb
;
941 #ifdef TARGET_HAS_PRECISE_SMC
942 CPUState
*env
= cpu_single_env
;
945 addr
&= TARGET_PAGE_MASK
;
946 p
= page_find(addr
>> TARGET_PAGE_BITS
);
950 current_tb_modified
= 0;
952 current_pc
= 0; /* avoid warning */
953 current_cs_base
= 0; /* avoid warning */
954 current_flags
= 0; /* avoid warning */
955 #ifdef TARGET_HAS_PRECISE_SMC
957 current_tb
= tb_find_pc(pc
);
962 tb
= (TranslationBlock
*)((long)tb
& ~3);
963 #ifdef TARGET_HAS_PRECISE_SMC
964 if (current_tb
== tb
&&
965 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
966 /* If we are modifying the current TB, we must stop
967 its execution. We could be more precise by checking
968 that the modification is after the current PC, but it
969 would require a specialized function to partially
970 restore the CPU state */
972 current_tb_modified
= 1;
973 cpu_restore_state(current_tb
, env
, pc
, puc
);
974 #if defined(TARGET_I386)
975 current_flags
= env
->hflags
;
976 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
977 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
978 current_pc
= current_cs_base
+ env
->eip
;
980 #error unsupported CPU
983 #endif /* TARGET_HAS_PRECISE_SMC */
984 tb_phys_invalidate(tb
, addr
);
985 tb
= tb
->page_next
[n
];
988 #ifdef TARGET_HAS_PRECISE_SMC
989 if (current_tb_modified
) {
990 /* we generate a block containing just the instruction
991 modifying the memory. It will ensure that it cannot modify
993 env
->current_tb
= NULL
;
994 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
996 cpu_resume_from_signal(env
, puc
);
1002 /* add the tb in the target page and protect it if necessary */
1003 static inline void tb_alloc_page(TranslationBlock
*tb
,
1004 unsigned int n
, target_ulong page_addr
)
1007 TranslationBlock
*last_first_tb
;
1009 tb
->page_addr
[n
] = page_addr
;
1010 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1011 tb
->page_next
[n
] = p
->first_tb
;
1012 last_first_tb
= p
->first_tb
;
1013 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1014 invalidate_page_bitmap(p
);
1016 #if defined(TARGET_HAS_SMC) || 1
1018 #if defined(CONFIG_USER_ONLY)
1019 if (p
->flags
& PAGE_WRITE
) {
1024 /* force the host page as non writable (writes will have a
1025 page fault + mprotect overhead) */
1026 page_addr
&= qemu_host_page_mask
;
1028 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1029 addr
+= TARGET_PAGE_SIZE
) {
1031 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1035 p2
->flags
&= ~PAGE_WRITE
;
1036 page_get_flags(addr
);
1038 mprotect(g2h(page_addr
), qemu_host_page_size
,
1039 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1040 #ifdef DEBUG_TB_INVALIDATE
1041 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1046 /* if some code is already present, then the pages are already
1047 protected. So we handle the case where only the first TB is
1048 allocated in a physical page */
1049 if (!last_first_tb
) {
1050 tlb_protect_code(page_addr
);
1054 #endif /* TARGET_HAS_SMC */
1057 /* Allocate a new translation block. Flush the translation buffer if
1058 too many translation blocks or too much generated code. */
1059 TranslationBlock
*tb_alloc(target_ulong pc
)
1061 TranslationBlock
*tb
;
1063 if (nb_tbs
>= code_gen_max_blocks
||
1064 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1066 tb
= &tbs
[nb_tbs
++];
1072 /* add a new TB and link it to the physical page tables. phys_page2 is
1073 (-1) to indicate that only one page contains the TB. */
1074 void tb_link_phys(TranslationBlock
*tb
,
1075 target_ulong phys_pc
, target_ulong phys_page2
)
1078 TranslationBlock
**ptb
;
1080 /* Grab the mmap lock to stop another thread invalidating this TB
1081 before we are done. */
1083 /* add in the physical hash table */
1084 h
= tb_phys_hash_func(phys_pc
);
1085 ptb
= &tb_phys_hash
[h
];
1086 tb
->phys_hash_next
= *ptb
;
1089 /* add in the page list */
1090 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1091 if (phys_page2
!= -1)
1092 tb_alloc_page(tb
, 1, phys_page2
);
1094 tb
->page_addr
[1] = -1;
1096 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1097 tb
->jmp_next
[0] = NULL
;
1098 tb
->jmp_next
[1] = NULL
;
1100 /* init original jump addresses */
1101 if (tb
->tb_next_offset
[0] != 0xffff)
1102 tb_reset_jump(tb
, 0);
1103 if (tb
->tb_next_offset
[1] != 0xffff)
1104 tb_reset_jump(tb
, 1);
1106 #ifdef DEBUG_TB_CHECK
1112 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1113 tb[1].tc_ptr. Return NULL if not found */
1114 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1116 int m_min
, m_max
, m
;
1118 TranslationBlock
*tb
;
1122 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1123 tc_ptr
>= (unsigned long)code_gen_ptr
)
1125 /* binary search (cf Knuth) */
1128 while (m_min
<= m_max
) {
1129 m
= (m_min
+ m_max
) >> 1;
1131 v
= (unsigned long)tb
->tc_ptr
;
1134 else if (tc_ptr
< v
) {
1143 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1145 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1147 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1150 tb1
= tb
->jmp_next
[n
];
1152 /* find head of list */
1155 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1158 tb1
= tb1
->jmp_next
[n1
];
1160 /* we are now sure now that tb jumps to tb1 */
1163 /* remove tb from the jmp_first list */
1164 ptb
= &tb_next
->jmp_first
;
1168 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1169 if (n1
== n
&& tb1
== tb
)
1171 ptb
= &tb1
->jmp_next
[n1
];
1173 *ptb
= tb
->jmp_next
[n
];
1174 tb
->jmp_next
[n
] = NULL
;
1176 /* suppress the jump to next tb in generated code */
1177 tb_reset_jump(tb
, n
);
1179 /* suppress jumps in the tb on which we could have jumped */
1180 tb_reset_jump_recursive(tb_next
);
1184 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1186 tb_reset_jump_recursive2(tb
, 0);
1187 tb_reset_jump_recursive2(tb
, 1);
1190 #if defined(TARGET_HAS_ICE)
1191 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1193 target_phys_addr_t addr
;
1195 ram_addr_t ram_addr
;
1198 addr
= cpu_get_phys_page_debug(env
, pc
);
1199 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1201 pd
= IO_MEM_UNASSIGNED
;
1203 pd
= p
->phys_offset
;
1205 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1206 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1210 /* Add a watchpoint. */
1211 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, int type
)
1215 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1216 if (addr
== env
->watchpoint
[i
].vaddr
)
1219 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1222 i
= env
->nb_watchpoints
++;
1223 env
->watchpoint
[i
].vaddr
= addr
;
1224 env
->watchpoint
[i
].type
= type
;
1225 tlb_flush_page(env
, addr
);
1226 /* FIXME: This flush is needed because of the hack to make memory ops
1227 terminate the TB. It can be removed once the proper IO trap and
1228 re-execute bits are in. */
1233 /* Remove a watchpoint. */
1234 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1238 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1239 if (addr
== env
->watchpoint
[i
].vaddr
) {
1240 env
->nb_watchpoints
--;
1241 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1242 tlb_flush_page(env
, addr
);
1249 /* Remove all watchpoints. */
1250 void cpu_watchpoint_remove_all(CPUState
*env
) {
1253 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1254 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1256 env
->nb_watchpoints
= 0;
1259 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1260 breakpoint is reached */
1261 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1263 #if defined(TARGET_HAS_ICE)
1266 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1267 if (env
->breakpoints
[i
] == pc
)
1271 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1273 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1275 breakpoint_invalidate(env
, pc
);
1282 /* remove all breakpoints */
1283 void cpu_breakpoint_remove_all(CPUState
*env
) {
1284 #if defined(TARGET_HAS_ICE)
1286 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1287 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1289 env
->nb_breakpoints
= 0;
1293 /* remove a breakpoint */
1294 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1296 #if defined(TARGET_HAS_ICE)
1298 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1299 if (env
->breakpoints
[i
] == pc
)
1304 env
->nb_breakpoints
--;
1305 if (i
< env
->nb_breakpoints
)
1306 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1308 breakpoint_invalidate(env
, pc
);
1315 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1316 CPU loop after each instruction */
1317 void cpu_single_step(CPUState
*env
, int enabled
)
1319 #if defined(TARGET_HAS_ICE)
1320 if (env
->singlestep_enabled
!= enabled
) {
1321 env
->singlestep_enabled
= enabled
;
1322 /* must flush all the translated code to avoid inconsistancies */
1323 /* XXX: only flush what is necessary */
1329 /* enable or disable low levels log */
1330 void cpu_set_log(int log_flags
)
1332 loglevel
= log_flags
;
1333 if (loglevel
&& !logfile
) {
1334 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1336 perror(logfilename
);
1339 #if !defined(CONFIG_SOFTMMU)
1340 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1342 static uint8_t logfile_buf
[4096];
1343 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1346 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1350 if (!loglevel
&& logfile
) {
1356 void cpu_set_log_filename(const char *filename
)
1358 logfilename
= strdup(filename
);
1363 cpu_set_log(loglevel
);
1366 /* mask must never be zero, except for A20 change call */
1367 void cpu_interrupt(CPUState
*env
, int mask
)
1369 #if !defined(USE_NPTL)
1370 TranslationBlock
*tb
;
1371 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1374 /* FIXME: This is probably not threadsafe. A different thread could
1375 be in the mittle of a read-modify-write operation. */
1376 env
->interrupt_request
|= mask
;
1377 #if defined(USE_NPTL)
1378 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1379 problem and hope the cpu will stop of its own accord. For userspace
1380 emulation this often isn't actually as bad as it sounds. Often
1381 signals are used primarily to interrupt blocking syscalls. */
1383 /* if the cpu is currently executing code, we must unlink it and
1384 all the potentially executing TB */
1385 tb
= env
->current_tb
;
1386 if (tb
&& !testandset(&interrupt_lock
)) {
1387 env
->current_tb
= NULL
;
1388 tb_reset_jump_recursive(tb
);
1389 resetlock(&interrupt_lock
);
1394 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1396 env
->interrupt_request
&= ~mask
;
1399 CPULogItem cpu_log_items
[] = {
1400 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1401 "show generated host assembly code for each compiled TB" },
1402 { CPU_LOG_TB_IN_ASM
, "in_asm",
1403 "show target assembly code for each compiled TB" },
1404 { CPU_LOG_TB_OP
, "op",
1405 "show micro ops for each compiled TB" },
1406 { CPU_LOG_TB_OP_OPT
, "op_opt",
1409 "before eflags optimization and "
1411 "after liveness analysis" },
1412 { CPU_LOG_INT
, "int",
1413 "show interrupts/exceptions in short format" },
1414 { CPU_LOG_EXEC
, "exec",
1415 "show trace before each executed TB (lots of logs)" },
1416 { CPU_LOG_TB_CPU
, "cpu",
1417 "show CPU state before block translation" },
1419 { CPU_LOG_PCALL
, "pcall",
1420 "show protected mode far calls/returns/exceptions" },
1423 { CPU_LOG_IOPORT
, "ioport",
1424 "show all i/o ports accesses" },
1429 static int cmp1(const char *s1
, int n
, const char *s2
)
1431 if (strlen(s2
) != n
)
1433 return memcmp(s1
, s2
, n
) == 0;
1436 /* takes a comma separated list of log masks. Return 0 if error. */
1437 int cpu_str_to_log_mask(const char *str
)
1446 p1
= strchr(p
, ',');
1449 if(cmp1(p
,p1
-p
,"all")) {
1450 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1454 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1455 if (cmp1(p
, p1
- p
, item
->name
))
1469 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1476 fprintf(stderr
, "qemu: fatal: ");
1477 vfprintf(stderr
, fmt
, ap
);
1478 fprintf(stderr
, "\n");
1480 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1482 cpu_dump_state(env
, stderr
, fprintf
, 0);
1485 fprintf(logfile
, "qemu: fatal: ");
1486 vfprintf(logfile
, fmt
, ap2
);
1487 fprintf(logfile
, "\n");
1489 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1491 cpu_dump_state(env
, logfile
, fprintf
, 0);
1501 CPUState
*cpu_copy(CPUState
*env
)
1503 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1504 /* preserve chaining and index */
1505 CPUState
*next_cpu
= new_env
->next_cpu
;
1506 int cpu_index
= new_env
->cpu_index
;
1507 memcpy(new_env
, env
, sizeof(CPUState
));
1508 new_env
->next_cpu
= next_cpu
;
1509 new_env
->cpu_index
= cpu_index
;
1513 #if !defined(CONFIG_USER_ONLY)
1515 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1519 /* Discard jump cache entries for any tb which might potentially
1520 overlap the flushed page. */
1521 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1522 memset (&env
->tb_jmp_cache
[i
], 0,
1523 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1525 i
= tb_jmp_cache_hash_page(addr
);
1526 memset (&env
->tb_jmp_cache
[i
], 0,
1527 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1530 /* NOTE: if flush_global is true, also flush global entries (not
1532 void tlb_flush(CPUState
*env
, int flush_global
)
1536 #if defined(DEBUG_TLB)
1537 printf("tlb_flush:\n");
1539 /* must reset current TB so that interrupts cannot modify the
1540 links while we are modifying them */
1541 env
->current_tb
= NULL
;
1543 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1544 env
->tlb_table
[0][i
].addr_read
= -1;
1545 env
->tlb_table
[0][i
].addr_write
= -1;
1546 env
->tlb_table
[0][i
].addr_code
= -1;
1547 env
->tlb_table
[1][i
].addr_read
= -1;
1548 env
->tlb_table
[1][i
].addr_write
= -1;
1549 env
->tlb_table
[1][i
].addr_code
= -1;
1550 #if (NB_MMU_MODES >= 3)
1551 env
->tlb_table
[2][i
].addr_read
= -1;
1552 env
->tlb_table
[2][i
].addr_write
= -1;
1553 env
->tlb_table
[2][i
].addr_code
= -1;
1554 #if (NB_MMU_MODES == 4)
1555 env
->tlb_table
[3][i
].addr_read
= -1;
1556 env
->tlb_table
[3][i
].addr_write
= -1;
1557 env
->tlb_table
[3][i
].addr_code
= -1;
1562 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1565 if (env
->kqemu_enabled
) {
1566 kqemu_flush(env
, flush_global
);
1572 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1574 if (addr
== (tlb_entry
->addr_read
&
1575 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1576 addr
== (tlb_entry
->addr_write
&
1577 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1578 addr
== (tlb_entry
->addr_code
&
1579 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1580 tlb_entry
->addr_read
= -1;
1581 tlb_entry
->addr_write
= -1;
1582 tlb_entry
->addr_code
= -1;
1586 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1590 #if defined(DEBUG_TLB)
1591 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1593 /* must reset current TB so that interrupts cannot modify the
1594 links while we are modifying them */
1595 env
->current_tb
= NULL
;
1597 addr
&= TARGET_PAGE_MASK
;
1598 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1599 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1600 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1601 #if (NB_MMU_MODES >= 3)
1602 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1603 #if (NB_MMU_MODES == 4)
1604 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1608 tlb_flush_jmp_cache(env
, addr
);
1611 if (env
->kqemu_enabled
) {
1612 kqemu_flush_page(env
, addr
);
1617 /* update the TLBs so that writes to code in the virtual page 'addr'
1619 static void tlb_protect_code(ram_addr_t ram_addr
)
1621 cpu_physical_memory_reset_dirty(ram_addr
,
1622 ram_addr
+ TARGET_PAGE_SIZE
,
1626 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1627 tested for self modifying code */
1628 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1631 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1634 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1635 unsigned long start
, unsigned long length
)
1638 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1639 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1640 if ((addr
- start
) < length
) {
1641 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1646 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1650 unsigned long length
, start1
;
1654 start
&= TARGET_PAGE_MASK
;
1655 end
= TARGET_PAGE_ALIGN(end
);
1657 length
= end
- start
;
1660 len
= length
>> TARGET_PAGE_BITS
;
1662 /* XXX: should not depend on cpu context */
1664 if (env
->kqemu_enabled
) {
1667 for(i
= 0; i
< len
; i
++) {
1668 kqemu_set_notdirty(env
, addr
);
1669 addr
+= TARGET_PAGE_SIZE
;
1673 mask
= ~dirty_flags
;
1674 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1675 for(i
= 0; i
< len
; i
++)
1678 /* we modify the TLB cache so that the dirty bit will be set again
1679 when accessing the range */
1680 start1
= start
+ (unsigned long)phys_ram_base
;
1681 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1682 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1683 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1684 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1685 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1686 #if (NB_MMU_MODES >= 3)
1687 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1688 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1689 #if (NB_MMU_MODES == 4)
1690 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1691 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1697 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1699 ram_addr_t ram_addr
;
1701 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1702 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1703 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1704 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1705 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1710 /* update the TLB according to the current state of the dirty bits */
1711 void cpu_tlb_update_dirty(CPUState
*env
)
1714 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1715 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1716 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1717 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1718 #if (NB_MMU_MODES >= 3)
1719 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1720 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1721 #if (NB_MMU_MODES == 4)
1722 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1723 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1728 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1730 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1731 tlb_entry
->addr_write
= vaddr
;
1734 /* update the TLB corresponding to virtual page vaddr
1735 so that it is no longer dirty */
1736 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1740 vaddr
&= TARGET_PAGE_MASK
;
1741 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1742 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1743 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1744 #if (NB_MMU_MODES >= 3)
1745 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1746 #if (NB_MMU_MODES == 4)
1747 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1752 /* add a new TLB entry. At most one entry for a given virtual address
1753 is permitted. Return 0 if OK or 2 if the page could not be mapped
1754 (can only happen in non SOFTMMU mode for I/O pages or pages
1755 conflicting with the host address space). */
1756 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1757 target_phys_addr_t paddr
, int prot
,
1758 int mmu_idx
, int is_softmmu
)
1763 target_ulong address
;
1764 target_ulong code_address
;
1765 target_phys_addr_t addend
;
1769 target_phys_addr_t iotlb
;
1771 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1773 pd
= IO_MEM_UNASSIGNED
;
1775 pd
= p
->phys_offset
;
1777 #if defined(DEBUG_TLB)
1778 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1779 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1784 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1785 /* IO memory case (romd handled later) */
1786 address
|= TLB_MMIO
;
1788 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1789 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1791 iotlb
= pd
& TARGET_PAGE_MASK
;
1792 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1793 iotlb
|= IO_MEM_NOTDIRTY
;
1795 iotlb
|= IO_MEM_ROM
;
1797 /* IO handlers are currently passed a phsical address.
1798 It would be nice to pass an offset from the base address
1799 of that region. This would avoid having to special case RAM,
1800 and avoid full address decoding in every device.
1801 We can't use the high bits of pd for this because
1802 IO_MEM_ROMD uses these as a ram address. */
1803 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1806 code_address
= address
;
1807 /* Make accesses to pages with watchpoints go via the
1808 watchpoint trap routines. */
1809 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1810 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1811 iotlb
= io_mem_watch
+ paddr
;
1812 /* TODO: The memory case can be optimized by not trapping
1813 reads of pages with a write breakpoint. */
1814 address
|= TLB_MMIO
;
1818 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1819 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1820 te
= &env
->tlb_table
[mmu_idx
][index
];
1821 te
->addend
= addend
- vaddr
;
1822 if (prot
& PAGE_READ
) {
1823 te
->addr_read
= address
;
1828 if (prot
& PAGE_EXEC
) {
1829 te
->addr_code
= code_address
;
1833 if (prot
& PAGE_WRITE
) {
1834 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1835 (pd
& IO_MEM_ROMD
)) {
1836 /* Write access calls the I/O callback. */
1837 te
->addr_write
= address
| TLB_MMIO
;
1838 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1839 !cpu_physical_memory_is_dirty(pd
)) {
1840 te
->addr_write
= address
| TLB_NOTDIRTY
;
1842 te
->addr_write
= address
;
1845 te
->addr_write
= -1;
1852 void tlb_flush(CPUState
*env
, int flush_global
)
1856 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1860 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1861 target_phys_addr_t paddr
, int prot
,
1862 int mmu_idx
, int is_softmmu
)
1867 /* dump memory mappings */
1868 void page_dump(FILE *f
)
1870 unsigned long start
, end
;
1871 int i
, j
, prot
, prot1
;
1874 fprintf(f
, "%-8s %-8s %-8s %s\n",
1875 "start", "end", "size", "prot");
1879 for(i
= 0; i
<= L1_SIZE
; i
++) {
1884 for(j
= 0;j
< L2_SIZE
; j
++) {
1889 if (prot1
!= prot
) {
1890 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1892 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1893 start
, end
, end
- start
,
1894 prot
& PAGE_READ
? 'r' : '-',
1895 prot
& PAGE_WRITE
? 'w' : '-',
1896 prot
& PAGE_EXEC
? 'x' : '-');
1910 int page_get_flags(target_ulong address
)
1914 p
= page_find(address
>> TARGET_PAGE_BITS
);
1920 /* modify the flags of a page and invalidate the code if
1921 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1922 depending on PAGE_WRITE */
1923 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1928 /* mmap_lock should already be held. */
1929 start
= start
& TARGET_PAGE_MASK
;
1930 end
= TARGET_PAGE_ALIGN(end
);
1931 if (flags
& PAGE_WRITE
)
1932 flags
|= PAGE_WRITE_ORG
;
1933 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1934 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1935 /* We may be called for host regions that are outside guest
1939 /* if the write protection is set, then we invalidate the code
1941 if (!(p
->flags
& PAGE_WRITE
) &&
1942 (flags
& PAGE_WRITE
) &&
1944 tb_invalidate_phys_page(addr
, 0, NULL
);
1950 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1956 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1957 start
= start
& TARGET_PAGE_MASK
;
1960 /* we've wrapped around */
1962 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1963 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1966 if( !(p
->flags
& PAGE_VALID
) )
1969 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1971 if (flags
& PAGE_WRITE
) {
1972 if (!(p
->flags
& PAGE_WRITE_ORG
))
1974 /* unprotect the page if it was put read-only because it
1975 contains translated code */
1976 if (!(p
->flags
& PAGE_WRITE
)) {
1977 if (!page_unprotect(addr
, 0, NULL
))
1986 /* called from signal handler: invalidate the code and unprotect the
1987 page. Return TRUE if the fault was succesfully handled. */
1988 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1990 unsigned int page_index
, prot
, pindex
;
1992 target_ulong host_start
, host_end
, addr
;
1994 /* Technically this isn't safe inside a signal handler. However we
1995 know this only ever happens in a synchronous SEGV handler, so in
1996 practice it seems to be ok. */
1999 host_start
= address
& qemu_host_page_mask
;
2000 page_index
= host_start
>> TARGET_PAGE_BITS
;
2001 p1
= page_find(page_index
);
2006 host_end
= host_start
+ qemu_host_page_size
;
2009 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2013 /* if the page was really writable, then we change its
2014 protection back to writable */
2015 if (prot
& PAGE_WRITE_ORG
) {
2016 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2017 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2018 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2019 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2020 p1
[pindex
].flags
|= PAGE_WRITE
;
2021 /* and since the content will be modified, we must invalidate
2022 the corresponding translated code. */
2023 tb_invalidate_phys_page(address
, pc
, puc
);
2024 #ifdef DEBUG_TB_CHECK
2025 tb_invalidate_check(address
);
2035 static inline void tlb_set_dirty(CPUState
*env
,
2036 unsigned long addr
, target_ulong vaddr
)
2039 #endif /* defined(CONFIG_USER_ONLY) */
2041 #if !defined(CONFIG_USER_ONLY)
2042 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2044 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2045 ram_addr_t orig_memory
);
2046 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2049 if (addr > start_addr) \
2052 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2053 if (start_addr2 > 0) \
2057 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2058 end_addr2 = TARGET_PAGE_SIZE - 1; \
2060 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2061 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2066 /* register physical memory. 'size' must be a multiple of the target
2067 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2069 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2071 ram_addr_t phys_offset
)
2073 target_phys_addr_t addr
, end_addr
;
2076 ram_addr_t orig_size
= size
;
2080 /* XXX: should not depend on cpu context */
2082 if (env
->kqemu_enabled
) {
2083 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2086 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2087 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2088 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2089 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2090 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2091 ram_addr_t orig_memory
= p
->phys_offset
;
2092 target_phys_addr_t start_addr2
, end_addr2
;
2093 int need_subpage
= 0;
2095 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2097 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2098 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2099 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2100 &p
->phys_offset
, orig_memory
);
2102 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2105 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2107 p
->phys_offset
= phys_offset
;
2108 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2109 (phys_offset
& IO_MEM_ROMD
))
2110 phys_offset
+= TARGET_PAGE_SIZE
;
2113 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2114 p
->phys_offset
= phys_offset
;
2115 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2116 (phys_offset
& IO_MEM_ROMD
))
2117 phys_offset
+= TARGET_PAGE_SIZE
;
2119 target_phys_addr_t start_addr2
, end_addr2
;
2120 int need_subpage
= 0;
2122 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2123 end_addr2
, need_subpage
);
2125 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2126 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2127 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2128 subpage_register(subpage
, start_addr2
, end_addr2
,
2135 /* since each CPU stores ram addresses in its TLB cache, we must
2136 reset the modified entries */
2138 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2143 /* XXX: temporary until new memory mapping API */
2144 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2148 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2150 return IO_MEM_UNASSIGNED
;
2151 return p
->phys_offset
;
2154 /* XXX: better than nothing */
2155 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2158 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2159 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2160 (uint64_t)size
, (uint64_t)phys_ram_size
);
2163 addr
= phys_ram_alloc_offset
;
2164 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2168 void qemu_ram_free(ram_addr_t addr
)
2172 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2174 #ifdef DEBUG_UNASSIGNED
2175 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2178 do_unassigned_access(addr
, 0, 0, 0);
2180 do_unassigned_access(addr
, 0, 0, 0);
2185 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2187 #ifdef DEBUG_UNASSIGNED
2188 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2191 do_unassigned_access(addr
, 1, 0, 0);
2193 do_unassigned_access(addr
, 1, 0, 0);
2197 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2198 unassigned_mem_readb
,
2199 unassigned_mem_readb
,
2200 unassigned_mem_readb
,
2203 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2204 unassigned_mem_writeb
,
2205 unassigned_mem_writeb
,
2206 unassigned_mem_writeb
,
2209 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2213 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2214 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2215 #if !defined(CONFIG_USER_ONLY)
2216 tb_invalidate_phys_page_fast(ram_addr
, 1);
2217 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2220 stb_p(phys_ram_base
+ ram_addr
, val
);
2222 if (cpu_single_env
->kqemu_enabled
&&
2223 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2224 kqemu_modify_page(cpu_single_env
, ram_addr
);
2226 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2227 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2228 /* we remove the notdirty callback only if the code has been
2230 if (dirty_flags
== 0xff)
2231 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_write_vaddr
);
2234 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2238 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2239 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2240 #if !defined(CONFIG_USER_ONLY)
2241 tb_invalidate_phys_page_fast(ram_addr
, 2);
2242 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2245 stw_p(phys_ram_base
+ ram_addr
, val
);
2247 if (cpu_single_env
->kqemu_enabled
&&
2248 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2249 kqemu_modify_page(cpu_single_env
, ram_addr
);
2251 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2252 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2253 /* we remove the notdirty callback only if the code has been
2255 if (dirty_flags
== 0xff)
2256 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_write_vaddr
);
2259 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2263 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2264 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2265 #if !defined(CONFIG_USER_ONLY)
2266 tb_invalidate_phys_page_fast(ram_addr
, 4);
2267 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2270 stl_p(phys_ram_base
+ ram_addr
, val
);
2272 if (cpu_single_env
->kqemu_enabled
&&
2273 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2274 kqemu_modify_page(cpu_single_env
, ram_addr
);
2276 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2277 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2278 /* we remove the notdirty callback only if the code has been
2280 if (dirty_flags
== 0xff)
2281 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_write_vaddr
);
2284 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2285 NULL
, /* never used */
2286 NULL
, /* never used */
2287 NULL
, /* never used */
2290 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2291 notdirty_mem_writeb
,
2292 notdirty_mem_writew
,
2293 notdirty_mem_writel
,
2296 /* Generate a debug exception if a watchpoint has been hit. */
2297 static void check_watchpoint(int offset
, int flags
)
2299 CPUState
*env
= cpu_single_env
;
2303 vaddr
= (env
->mem_write_vaddr
& TARGET_PAGE_MASK
) + offset
;
2304 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2305 if (vaddr
== env
->watchpoint
[i
].vaddr
2306 && (env
->watchpoint
[i
].type
& flags
)) {
2307 env
->watchpoint_hit
= i
+ 1;
2308 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2314 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2315 so these check for a hit then pass through to the normal out-of-line
2317 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2319 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2320 return ldub_phys(addr
);
2323 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2325 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2326 return lduw_phys(addr
);
2329 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2331 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2332 return ldl_phys(addr
);
2335 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2338 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2339 stb_phys(addr
, val
);
2342 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2345 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2346 stw_phys(addr
, val
);
2349 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2352 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2353 stl_phys(addr
, val
);
2356 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2362 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2368 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2374 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2377 mmio
, len
, addr
, idx
);
2379 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2384 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2385 uint32_t value
, unsigned int len
)
2389 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2390 #if defined(DEBUG_SUBPAGE)
2391 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2392 mmio
, len
, addr
, idx
, value
);
2394 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2397 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2399 #if defined(DEBUG_SUBPAGE)
2400 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2403 return subpage_readlen(opaque
, addr
, 0);
2406 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2409 #if defined(DEBUG_SUBPAGE)
2410 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2412 subpage_writelen(opaque
, addr
, value
, 0);
2415 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2417 #if defined(DEBUG_SUBPAGE)
2418 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2421 return subpage_readlen(opaque
, addr
, 1);
2424 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2427 #if defined(DEBUG_SUBPAGE)
2428 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2430 subpage_writelen(opaque
, addr
, value
, 1);
2433 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2435 #if defined(DEBUG_SUBPAGE)
2436 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2439 return subpage_readlen(opaque
, addr
, 2);
2442 static void subpage_writel (void *opaque
,
2443 target_phys_addr_t addr
, uint32_t value
)
2445 #if defined(DEBUG_SUBPAGE)
2446 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2448 subpage_writelen(opaque
, addr
, value
, 2);
2451 static CPUReadMemoryFunc
*subpage_read
[] = {
2457 static CPUWriteMemoryFunc
*subpage_write
[] = {
2463 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2469 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2471 idx
= SUBPAGE_IDX(start
);
2472 eidx
= SUBPAGE_IDX(end
);
2473 #if defined(DEBUG_SUBPAGE)
2474 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2475 mmio
, start
, end
, idx
, eidx
, memory
);
2477 memory
>>= IO_MEM_SHIFT
;
2478 for (; idx
<= eidx
; idx
++) {
2479 for (i
= 0; i
< 4; i
++) {
2480 if (io_mem_read
[memory
][i
]) {
2481 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2482 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2484 if (io_mem_write
[memory
][i
]) {
2485 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2486 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2494 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2495 ram_addr_t orig_memory
)
2500 mmio
= qemu_mallocz(sizeof(subpage_t
));
2503 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2504 #if defined(DEBUG_SUBPAGE)
2505 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2506 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2508 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2509 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2515 static void io_mem_init(void)
2517 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2518 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2519 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2522 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2523 watch_mem_write
, NULL
);
2524 /* alloc dirty bits array */
2525 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2526 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2529 /* mem_read and mem_write are arrays of functions containing the
2530 function to access byte (index 0), word (index 1) and dword (index
2531 2). Functions can be omitted with a NULL function pointer. The
2532 registered functions may be modified dynamically later.
2533 If io_index is non zero, the corresponding io zone is
2534 modified. If it is zero, a new io zone is allocated. The return
2535 value can be used with cpu_register_physical_memory(). (-1) is
2536 returned if error. */
2537 int cpu_register_io_memory(int io_index
,
2538 CPUReadMemoryFunc
**mem_read
,
2539 CPUWriteMemoryFunc
**mem_write
,
2542 int i
, subwidth
= 0;
2544 if (io_index
<= 0) {
2545 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2547 io_index
= io_mem_nb
++;
2549 if (io_index
>= IO_MEM_NB_ENTRIES
)
2553 for(i
= 0;i
< 3; i
++) {
2554 if (!mem_read
[i
] || !mem_write
[i
])
2555 subwidth
= IO_MEM_SUBWIDTH
;
2556 io_mem_read
[io_index
][i
] = mem_read
[i
];
2557 io_mem_write
[io_index
][i
] = mem_write
[i
];
2559 io_mem_opaque
[io_index
] = opaque
;
2560 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2563 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2565 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2568 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2570 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2573 #endif /* !defined(CONFIG_USER_ONLY) */
2575 /* physical memory access (slow version, mainly for debug) */
2576 #if defined(CONFIG_USER_ONLY)
2577 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2578 int len
, int is_write
)
2585 page
= addr
& TARGET_PAGE_MASK
;
2586 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2589 flags
= page_get_flags(page
);
2590 if (!(flags
& PAGE_VALID
))
2593 if (!(flags
& PAGE_WRITE
))
2595 /* XXX: this code should not depend on lock_user */
2596 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2597 /* FIXME - should this return an error rather than just fail? */
2600 unlock_user(p
, addr
, l
);
2602 if (!(flags
& PAGE_READ
))
2604 /* XXX: this code should not depend on lock_user */
2605 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2606 /* FIXME - should this return an error rather than just fail? */
2609 unlock_user(p
, addr
, 0);
2618 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2619 int len
, int is_write
)
2624 target_phys_addr_t page
;
2629 page
= addr
& TARGET_PAGE_MASK
;
2630 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2633 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2635 pd
= IO_MEM_UNASSIGNED
;
2637 pd
= p
->phys_offset
;
2641 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2642 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2643 /* XXX: could force cpu_single_env to NULL to avoid
2645 if (l
>= 4 && ((addr
& 3) == 0)) {
2646 /* 32 bit write access */
2648 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2650 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2651 /* 16 bit write access */
2653 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2656 /* 8 bit write access */
2658 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2662 unsigned long addr1
;
2663 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2665 ptr
= phys_ram_base
+ addr1
;
2666 memcpy(ptr
, buf
, l
);
2667 if (!cpu_physical_memory_is_dirty(addr1
)) {
2668 /* invalidate code */
2669 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2671 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2672 (0xff & ~CODE_DIRTY_FLAG
);
2676 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2677 !(pd
& IO_MEM_ROMD
)) {
2679 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2680 if (l
>= 4 && ((addr
& 3) == 0)) {
2681 /* 32 bit read access */
2682 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2685 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2686 /* 16 bit read access */
2687 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2691 /* 8 bit read access */
2692 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2698 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2699 (addr
& ~TARGET_PAGE_MASK
);
2700 memcpy(buf
, ptr
, l
);
2709 /* used for ROM loading : can write in RAM and ROM */
2710 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2711 const uint8_t *buf
, int len
)
2715 target_phys_addr_t page
;
2720 page
= addr
& TARGET_PAGE_MASK
;
2721 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2724 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2726 pd
= IO_MEM_UNASSIGNED
;
2728 pd
= p
->phys_offset
;
2731 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2732 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2733 !(pd
& IO_MEM_ROMD
)) {
2736 unsigned long addr1
;
2737 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2739 ptr
= phys_ram_base
+ addr1
;
2740 memcpy(ptr
, buf
, l
);
2749 /* warning: addr must be aligned */
2750 uint32_t ldl_phys(target_phys_addr_t addr
)
2758 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2760 pd
= IO_MEM_UNASSIGNED
;
2762 pd
= p
->phys_offset
;
2765 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2766 !(pd
& IO_MEM_ROMD
)) {
2768 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2769 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2772 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2773 (addr
& ~TARGET_PAGE_MASK
);
2779 /* warning: addr must be aligned */
2780 uint64_t ldq_phys(target_phys_addr_t addr
)
2788 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2790 pd
= IO_MEM_UNASSIGNED
;
2792 pd
= p
->phys_offset
;
2795 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2796 !(pd
& IO_MEM_ROMD
)) {
2798 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2799 #ifdef TARGET_WORDS_BIGENDIAN
2800 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2801 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2803 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2804 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2808 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2809 (addr
& ~TARGET_PAGE_MASK
);
2816 uint32_t ldub_phys(target_phys_addr_t addr
)
2819 cpu_physical_memory_read(addr
, &val
, 1);
2824 uint32_t lduw_phys(target_phys_addr_t addr
)
2827 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2828 return tswap16(val
);
2831 /* warning: addr must be aligned. The ram page is not masked as dirty
2832 and the code inside is not invalidated. It is useful if the dirty
2833 bits are used to track modified PTEs */
2834 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2841 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2843 pd
= IO_MEM_UNASSIGNED
;
2845 pd
= p
->phys_offset
;
2848 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2849 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2850 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2852 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2853 (addr
& ~TARGET_PAGE_MASK
);
2858 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2865 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2867 pd
= IO_MEM_UNASSIGNED
;
2869 pd
= p
->phys_offset
;
2872 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2873 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2874 #ifdef TARGET_WORDS_BIGENDIAN
2875 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2876 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2878 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2879 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2882 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2883 (addr
& ~TARGET_PAGE_MASK
);
2888 /* warning: addr must be aligned */
2889 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2896 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2898 pd
= IO_MEM_UNASSIGNED
;
2900 pd
= p
->phys_offset
;
2903 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2904 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2905 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2907 unsigned long addr1
;
2908 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2910 ptr
= phys_ram_base
+ addr1
;
2912 if (!cpu_physical_memory_is_dirty(addr1
)) {
2913 /* invalidate code */
2914 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2916 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2917 (0xff & ~CODE_DIRTY_FLAG
);
2923 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2926 cpu_physical_memory_write(addr
, &v
, 1);
2930 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2932 uint16_t v
= tswap16(val
);
2933 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2937 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2940 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2945 /* virtual memory access for debug */
2946 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2947 uint8_t *buf
, int len
, int is_write
)
2950 target_phys_addr_t phys_addr
;
2954 page
= addr
& TARGET_PAGE_MASK
;
2955 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2956 /* if no physical page mapped, return an error */
2957 if (phys_addr
== -1)
2959 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2962 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2971 void dump_exec_info(FILE *f
,
2972 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2974 int i
, target_code_size
, max_target_code_size
;
2975 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2976 TranslationBlock
*tb
;
2978 target_code_size
= 0;
2979 max_target_code_size
= 0;
2981 direct_jmp_count
= 0;
2982 direct_jmp2_count
= 0;
2983 for(i
= 0; i
< nb_tbs
; i
++) {
2985 target_code_size
+= tb
->size
;
2986 if (tb
->size
> max_target_code_size
)
2987 max_target_code_size
= tb
->size
;
2988 if (tb
->page_addr
[1] != -1)
2990 if (tb
->tb_next_offset
[0] != 0xffff) {
2992 if (tb
->tb_next_offset
[1] != 0xffff) {
2993 direct_jmp2_count
++;
2997 /* XXX: avoid using doubles ? */
2998 cpu_fprintf(f
, "Translation buffer state:\n");
2999 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3000 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3001 cpu_fprintf(f
, "TB count %d/%d\n",
3002 nb_tbs
, code_gen_max_blocks
);
3003 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3004 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3005 max_target_code_size
);
3006 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3007 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3008 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3009 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3011 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3012 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3014 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3016 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3017 cpu_fprintf(f
, "\nStatistics:\n");
3018 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3019 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3020 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3021 tcg_dump_info(f
, cpu_fprintf
);
3024 #if !defined(CONFIG_USER_ONLY)
3026 #define MMUSUFFIX _cmmu
3027 #define GETPC() NULL
3028 #define env cpu_single_env
3029 #define SOFTMMU_CODE_ACCESS
3032 #include "softmmu_template.h"
3035 #include "softmmu_template.h"
3038 #include "softmmu_template.h"
3041 #include "softmmu_template.h"