2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
39 #if defined(CONFIG_USER_ONLY)
43 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_UNASSIGNED
48 /* make various TB consistency checks */
49 //#define DEBUG_TB_CHECK
50 //#define DEBUG_TLB_CHECK
52 //#define DEBUG_IOPORT
53 //#define DEBUG_SUBPAGE
55 #if !defined(CONFIG_USER_ONLY)
56 /* TB consistency checks only implemented for usermode emulation. */
60 /* threshold to flush the translated code buffer */
61 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #define MMAP_AREA_START 0x00000000
66 #define MMAP_AREA_END 0xa8000000
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 40
79 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
86 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
87 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
92 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
93 uint8_t *code_gen_ptr
;
95 ram_addr_t phys_ram_size
;
97 uint8_t *phys_ram_base
;
98 uint8_t *phys_ram_dirty
;
99 static ram_addr_t phys_ram_alloc_offset
= 0;
102 /* current CPU in the current thread. It is only valid inside
104 CPUState
*cpu_single_env
;
106 typedef struct PageDesc
{
107 /* list of TBs intersecting this ram page */
108 TranslationBlock
*first_tb
;
109 /* in order to optimize self modifying code, we count the number
110 of lookups we do to a given page to use a bitmap */
111 unsigned int code_write_count
;
112 uint8_t *code_bitmap
;
113 #if defined(CONFIG_USER_ONLY)
118 typedef struct PhysPageDesc
{
119 /* offset in host memory of the page + io_index in the low 12 bits */
120 ram_addr_t phys_offset
;
124 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125 /* XXX: this is a temporary hack for alpha target.
126 * In the future, this is to be replaced by a multi-level table
127 * to actually be able to handle the complete 64 bits address space.
129 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
131 #define L1_BITS (TARGET_PHYS_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
139 #define L1_SIZE (1 << L1_BITS)
140 #define L2_SIZE (1 << L2_BITS)
142 static void io_mem_init(void);
144 unsigned long qemu_real_host_page_size
;
145 unsigned long qemu_host_page_bits
;
146 unsigned long qemu_host_page_size
;
147 unsigned long qemu_host_page_mask
;
149 /* XXX: for system emulation, it could just be an array */
150 static PageDesc
*l1_map
[L1_SIZE
];
151 PhysPageDesc
**l1_phys_map
;
153 /* io memory support */
154 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
155 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
156 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
157 static int io_mem_nb
;
158 #if defined(CONFIG_SOFTMMU)
159 static int io_mem_watch
;
163 char *logfilename
= "/tmp/qemu.log";
166 static int log_append
= 0;
169 static int tlb_flush_count
;
170 static int tb_flush_count
;
171 static int tb_phys_invalidate_count
;
173 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174 typedef struct subpage_t
{
175 target_phys_addr_t base
;
176 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
177 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
178 void *opaque
[TARGET_PAGE_SIZE
][2][4];
181 static void page_init(void)
183 /* NOTE: we can always suppose that qemu_host_page_size >=
187 SYSTEM_INFO system_info
;
190 GetSystemInfo(&system_info
);
191 qemu_real_host_page_size
= system_info
.dwPageSize
;
193 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
194 PAGE_EXECUTE_READWRITE
, &old_protect
);
197 qemu_real_host_page_size
= getpagesize();
199 unsigned long start
, end
;
201 start
= (unsigned long)code_gen_buffer
;
202 start
&= ~(qemu_real_host_page_size
- 1);
204 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
205 end
+= qemu_real_host_page_size
- 1;
206 end
&= ~(qemu_real_host_page_size
- 1);
208 mprotect((void *)start
, end
- start
,
209 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
213 if (qemu_host_page_size
== 0)
214 qemu_host_page_size
= qemu_real_host_page_size
;
215 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
216 qemu_host_page_size
= TARGET_PAGE_SIZE
;
217 qemu_host_page_bits
= 0;
218 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
219 qemu_host_page_bits
++;
220 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
221 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
222 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
224 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
226 long long startaddr
, endaddr
;
230 f
= fopen("/proc/self/maps", "r");
233 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
235 page_set_flags(TARGET_PAGE_ALIGN(startaddr
),
236 TARGET_PAGE_ALIGN(endaddr
),
246 static inline PageDesc
*page_find_alloc(target_ulong index
)
250 lp
= &l1_map
[index
>> L2_BITS
];
253 /* allocate if not found */
254 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
255 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
258 return p
+ (index
& (L2_SIZE
- 1));
261 static inline PageDesc
*page_find(target_ulong index
)
265 p
= l1_map
[index
>> L2_BITS
];
268 return p
+ (index
& (L2_SIZE
- 1));
271 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
276 p
= (void **)l1_phys_map
;
278 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
280 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
281 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
283 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
286 /* allocate if not found */
289 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
290 memset(p
, 0, sizeof(void *) * L1_SIZE
);
295 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
299 /* allocate if not found */
302 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
304 for (i
= 0; i
< L2_SIZE
; i
++)
305 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
307 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
310 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
312 return phys_page_find_alloc(index
, 0);
315 #if !defined(CONFIG_USER_ONLY)
316 static void tlb_protect_code(ram_addr_t ram_addr
);
317 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
321 void cpu_exec_init(CPUState
*env
)
328 code_gen_ptr
= code_gen_buffer
;
332 env
->next_cpu
= NULL
;
335 while (*penv
!= NULL
) {
336 penv
= (CPUState
**)&(*penv
)->next_cpu
;
339 env
->cpu_index
= cpu_index
;
340 env
->nb_watchpoints
= 0;
344 static inline void invalidate_page_bitmap(PageDesc
*p
)
346 if (p
->code_bitmap
) {
347 qemu_free(p
->code_bitmap
);
348 p
->code_bitmap
= NULL
;
350 p
->code_write_count
= 0;
353 /* set to NULL all the 'first_tb' fields in all PageDescs */
354 static void page_flush_tb(void)
359 for(i
= 0; i
< L1_SIZE
; i
++) {
362 for(j
= 0; j
< L2_SIZE
; j
++) {
364 invalidate_page_bitmap(p
);
371 /* flush all the translation blocks */
372 /* XXX: tb_flush is currently not thread safe */
373 void tb_flush(CPUState
*env1
)
376 #if defined(DEBUG_FLUSH)
377 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
378 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
380 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
382 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > CODE_GEN_BUFFER_SIZE
)
383 cpu_abort(env1
, "Internal error: code buffer overflow\n");
387 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
388 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
391 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
394 code_gen_ptr
= code_gen_buffer
;
395 /* XXX: flush processor icache at this point if cache flush is
400 #ifdef DEBUG_TB_CHECK
402 static void tb_invalidate_check(target_ulong address
)
404 TranslationBlock
*tb
;
406 address
&= TARGET_PAGE_MASK
;
407 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
408 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
409 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
410 address
>= tb
->pc
+ tb
->size
)) {
411 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
412 address
, (long)tb
->pc
, tb
->size
);
418 /* verify that all the pages have correct rights for code */
419 static void tb_page_check(void)
421 TranslationBlock
*tb
;
422 int i
, flags1
, flags2
;
424 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
425 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
426 flags1
= page_get_flags(tb
->pc
);
427 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
428 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
429 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
430 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
436 void tb_jmp_check(TranslationBlock
*tb
)
438 TranslationBlock
*tb1
;
441 /* suppress any remaining jumps to this TB */
445 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
448 tb1
= tb1
->jmp_next
[n1
];
450 /* check end of list */
452 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
458 /* invalidate one TB */
459 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
462 TranslationBlock
*tb1
;
466 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
469 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
473 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
475 TranslationBlock
*tb1
;
481 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
483 *ptb
= tb1
->page_next
[n1
];
486 ptb
= &tb1
->page_next
[n1
];
490 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
492 TranslationBlock
*tb1
, **ptb
;
495 ptb
= &tb
->jmp_next
[n
];
498 /* find tb(n) in circular list */
502 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
503 if (n1
== n
&& tb1
== tb
)
506 ptb
= &tb1
->jmp_first
;
508 ptb
= &tb1
->jmp_next
[n1
];
511 /* now we can suppress tb(n) from the list */
512 *ptb
= tb
->jmp_next
[n
];
514 tb
->jmp_next
[n
] = NULL
;
518 /* reset the jump entry 'n' of a TB so that it is not chained to
520 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
522 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
525 static inline void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
530 target_phys_addr_t phys_pc
;
531 TranslationBlock
*tb1
, *tb2
;
533 /* remove the TB from the hash list */
534 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
535 h
= tb_phys_hash_func(phys_pc
);
536 tb_remove(&tb_phys_hash
[h
], tb
,
537 offsetof(TranslationBlock
, phys_hash_next
));
539 /* remove the TB from the page list */
540 if (tb
->page_addr
[0] != page_addr
) {
541 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
542 tb_page_remove(&p
->first_tb
, tb
);
543 invalidate_page_bitmap(p
);
545 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
546 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
547 tb_page_remove(&p
->first_tb
, tb
);
548 invalidate_page_bitmap(p
);
551 tb_invalidated_flag
= 1;
553 /* remove the TB from the hash list */
554 h
= tb_jmp_cache_hash_func(tb
->pc
);
555 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
556 if (env
->tb_jmp_cache
[h
] == tb
)
557 env
->tb_jmp_cache
[h
] = NULL
;
560 /* suppress this TB from the two jump lists */
561 tb_jmp_remove(tb
, 0);
562 tb_jmp_remove(tb
, 1);
564 /* suppress any remaining jumps to this TB */
570 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
571 tb2
= tb1
->jmp_next
[n1
];
572 tb_reset_jump(tb1
, n1
);
573 tb1
->jmp_next
[n1
] = NULL
;
576 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
578 tb_phys_invalidate_count
++;
581 static inline void set_bits(uint8_t *tab
, int start
, int len
)
587 mask
= 0xff << (start
& 7);
588 if ((start
& ~7) == (end
& ~7)) {
590 mask
&= ~(0xff << (end
& 7));
595 start
= (start
+ 8) & ~7;
597 while (start
< end1
) {
602 mask
= ~(0xff << (end
& 7));
608 static void build_page_bitmap(PageDesc
*p
)
610 int n
, tb_start
, tb_end
;
611 TranslationBlock
*tb
;
613 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
616 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
621 tb
= (TranslationBlock
*)((long)tb
& ~3);
622 /* NOTE: this is subtle as a TB may span two physical pages */
624 /* NOTE: tb_end may be after the end of the page, but
625 it is not a problem */
626 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
627 tb_end
= tb_start
+ tb
->size
;
628 if (tb_end
> TARGET_PAGE_SIZE
)
629 tb_end
= TARGET_PAGE_SIZE
;
632 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
634 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
635 tb
= tb
->page_next
[n
];
639 #ifdef TARGET_HAS_PRECISE_SMC
641 static void tb_gen_code(CPUState
*env
,
642 target_ulong pc
, target_ulong cs_base
, int flags
,
645 TranslationBlock
*tb
;
647 target_ulong phys_pc
, phys_page2
, virt_page2
;
650 phys_pc
= get_phys_addr_code(env
, pc
);
653 /* flush must be done */
655 /* cannot fail at this point */
658 tc_ptr
= code_gen_ptr
;
660 tb
->cs_base
= cs_base
;
663 cpu_gen_code(env
, tb
, &code_gen_size
);
664 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
666 /* check next page if needed */
667 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
669 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
670 phys_page2
= get_phys_addr_code(env
, virt_page2
);
672 tb_link_phys(tb
, phys_pc
, phys_page2
);
676 /* invalidate all TBs which intersect with the target physical page
677 starting in range [start;end[. NOTE: start and end must refer to
678 the same physical page. 'is_cpu_write_access' should be true if called
679 from a real cpu write access: the virtual CPU will exit the current
680 TB if code is modified inside this TB. */
681 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
682 int is_cpu_write_access
)
684 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
685 CPUState
*env
= cpu_single_env
;
687 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
688 target_ulong tb_start
, tb_end
;
689 target_ulong current_pc
, current_cs_base
;
691 p
= page_find(start
>> TARGET_PAGE_BITS
);
694 if (!p
->code_bitmap
&&
695 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
696 is_cpu_write_access
) {
697 /* build code bitmap */
698 build_page_bitmap(p
);
701 /* we remove all the TBs in the range [start, end[ */
702 /* XXX: see if in some cases it could be faster to invalidate all the code */
703 current_tb_not_found
= is_cpu_write_access
;
704 current_tb_modified
= 0;
705 current_tb
= NULL
; /* avoid warning */
706 current_pc
= 0; /* avoid warning */
707 current_cs_base
= 0; /* avoid warning */
708 current_flags
= 0; /* avoid warning */
712 tb
= (TranslationBlock
*)((long)tb
& ~3);
713 tb_next
= tb
->page_next
[n
];
714 /* NOTE: this is subtle as a TB may span two physical pages */
716 /* NOTE: tb_end may be after the end of the page, but
717 it is not a problem */
718 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
719 tb_end
= tb_start
+ tb
->size
;
721 tb_start
= tb
->page_addr
[1];
722 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
724 if (!(tb_end
<= start
|| tb_start
>= end
)) {
725 #ifdef TARGET_HAS_PRECISE_SMC
726 if (current_tb_not_found
) {
727 current_tb_not_found
= 0;
729 if (env
->mem_write_pc
) {
730 /* now we have a real cpu fault */
731 current_tb
= tb_find_pc(env
->mem_write_pc
);
734 if (current_tb
== tb
&&
735 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
736 /* If we are modifying the current TB, we must stop
737 its execution. We could be more precise by checking
738 that the modification is after the current PC, but it
739 would require a specialized function to partially
740 restore the CPU state */
742 current_tb_modified
= 1;
743 cpu_restore_state(current_tb
, env
,
744 env
->mem_write_pc
, NULL
);
745 #if defined(TARGET_I386)
746 current_flags
= env
->hflags
;
747 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
748 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
749 current_pc
= current_cs_base
+ env
->eip
;
751 #error unsupported CPU
754 #endif /* TARGET_HAS_PRECISE_SMC */
755 /* we need to do that to handle the case where a signal
756 occurs while doing tb_phys_invalidate() */
759 saved_tb
= env
->current_tb
;
760 env
->current_tb
= NULL
;
762 tb_phys_invalidate(tb
, -1);
764 env
->current_tb
= saved_tb
;
765 if (env
->interrupt_request
&& env
->current_tb
)
766 cpu_interrupt(env
, env
->interrupt_request
);
771 #if !defined(CONFIG_USER_ONLY)
772 /* if no code remaining, no need to continue to use slow writes */
774 invalidate_page_bitmap(p
);
775 if (is_cpu_write_access
) {
776 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
780 #ifdef TARGET_HAS_PRECISE_SMC
781 if (current_tb_modified
) {
782 /* we generate a block containing just the instruction
783 modifying the memory. It will ensure that it cannot modify
785 env
->current_tb
= NULL
;
786 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
788 cpu_resume_from_signal(env
, NULL
);
793 /* len must be <= 8 and start must be a multiple of len */
794 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
801 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
802 cpu_single_env
->mem_write_vaddr
, len
,
804 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
808 p
= page_find(start
>> TARGET_PAGE_BITS
);
811 if (p
->code_bitmap
) {
812 offset
= start
& ~TARGET_PAGE_MASK
;
813 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
814 if (b
& ((1 << len
) - 1))
818 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
822 #if !defined(CONFIG_SOFTMMU)
823 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
824 unsigned long pc
, void *puc
)
826 int n
, current_flags
, current_tb_modified
;
827 target_ulong current_pc
, current_cs_base
;
829 TranslationBlock
*tb
, *current_tb
;
830 #ifdef TARGET_HAS_PRECISE_SMC
831 CPUState
*env
= cpu_single_env
;
834 addr
&= TARGET_PAGE_MASK
;
835 p
= page_find(addr
>> TARGET_PAGE_BITS
);
839 current_tb_modified
= 0;
841 current_pc
= 0; /* avoid warning */
842 current_cs_base
= 0; /* avoid warning */
843 current_flags
= 0; /* avoid warning */
844 #ifdef TARGET_HAS_PRECISE_SMC
846 current_tb
= tb_find_pc(pc
);
851 tb
= (TranslationBlock
*)((long)tb
& ~3);
852 #ifdef TARGET_HAS_PRECISE_SMC
853 if (current_tb
== tb
&&
854 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
855 /* If we are modifying the current TB, we must stop
856 its execution. We could be more precise by checking
857 that the modification is after the current PC, but it
858 would require a specialized function to partially
859 restore the CPU state */
861 current_tb_modified
= 1;
862 cpu_restore_state(current_tb
, env
, pc
, puc
);
863 #if defined(TARGET_I386)
864 current_flags
= env
->hflags
;
865 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
866 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
867 current_pc
= current_cs_base
+ env
->eip
;
869 #error unsupported CPU
872 #endif /* TARGET_HAS_PRECISE_SMC */
873 tb_phys_invalidate(tb
, addr
);
874 tb
= tb
->page_next
[n
];
877 #ifdef TARGET_HAS_PRECISE_SMC
878 if (current_tb_modified
) {
879 /* we generate a block containing just the instruction
880 modifying the memory. It will ensure that it cannot modify
882 env
->current_tb
= NULL
;
883 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
885 cpu_resume_from_signal(env
, puc
);
891 /* add the tb in the target page and protect it if necessary */
892 static inline void tb_alloc_page(TranslationBlock
*tb
,
893 unsigned int n
, target_ulong page_addr
)
896 TranslationBlock
*last_first_tb
;
898 tb
->page_addr
[n
] = page_addr
;
899 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
900 tb
->page_next
[n
] = p
->first_tb
;
901 last_first_tb
= p
->first_tb
;
902 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
903 invalidate_page_bitmap(p
);
905 #if defined(TARGET_HAS_SMC) || 1
907 #if defined(CONFIG_USER_ONLY)
908 if (p
->flags
& PAGE_WRITE
) {
913 /* force the host page as non writable (writes will have a
914 page fault + mprotect overhead) */
915 page_addr
&= qemu_host_page_mask
;
917 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
918 addr
+= TARGET_PAGE_SIZE
) {
920 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
924 p2
->flags
&= ~PAGE_WRITE
;
925 page_get_flags(addr
);
927 mprotect(g2h(page_addr
), qemu_host_page_size
,
928 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
929 #ifdef DEBUG_TB_INVALIDATE
930 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
935 /* if some code is already present, then the pages are already
936 protected. So we handle the case where only the first TB is
937 allocated in a physical page */
938 if (!last_first_tb
) {
939 tlb_protect_code(page_addr
);
943 #endif /* TARGET_HAS_SMC */
946 /* Allocate a new translation block. Flush the translation buffer if
947 too many translation blocks or too much generated code. */
948 TranslationBlock
*tb_alloc(target_ulong pc
)
950 TranslationBlock
*tb
;
952 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
953 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
961 /* add a new TB and link it to the physical page tables. phys_page2 is
962 (-1) to indicate that only one page contains the TB. */
963 void tb_link_phys(TranslationBlock
*tb
,
964 target_ulong phys_pc
, target_ulong phys_page2
)
967 TranslationBlock
**ptb
;
969 /* add in the physical hash table */
970 h
= tb_phys_hash_func(phys_pc
);
971 ptb
= &tb_phys_hash
[h
];
972 tb
->phys_hash_next
= *ptb
;
975 /* add in the page list */
976 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
977 if (phys_page2
!= -1)
978 tb_alloc_page(tb
, 1, phys_page2
);
980 tb
->page_addr
[1] = -1;
982 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
983 tb
->jmp_next
[0] = NULL
;
984 tb
->jmp_next
[1] = NULL
;
986 /* init original jump addresses */
987 if (tb
->tb_next_offset
[0] != 0xffff)
988 tb_reset_jump(tb
, 0);
989 if (tb
->tb_next_offset
[1] != 0xffff)
990 tb_reset_jump(tb
, 1);
992 #ifdef DEBUG_TB_CHECK
997 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
998 tb[1].tc_ptr. Return NULL if not found */
999 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1001 int m_min
, m_max
, m
;
1003 TranslationBlock
*tb
;
1007 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1008 tc_ptr
>= (unsigned long)code_gen_ptr
)
1010 /* binary search (cf Knuth) */
1013 while (m_min
<= m_max
) {
1014 m
= (m_min
+ m_max
) >> 1;
1016 v
= (unsigned long)tb
->tc_ptr
;
1019 else if (tc_ptr
< v
) {
1028 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1030 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1032 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1035 tb1
= tb
->jmp_next
[n
];
1037 /* find head of list */
1040 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1043 tb1
= tb1
->jmp_next
[n1
];
1045 /* we are now sure now that tb jumps to tb1 */
1048 /* remove tb from the jmp_first list */
1049 ptb
= &tb_next
->jmp_first
;
1053 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1054 if (n1
== n
&& tb1
== tb
)
1056 ptb
= &tb1
->jmp_next
[n1
];
1058 *ptb
= tb
->jmp_next
[n
];
1059 tb
->jmp_next
[n
] = NULL
;
1061 /* suppress the jump to next tb in generated code */
1062 tb_reset_jump(tb
, n
);
1064 /* suppress jumps in the tb on which we could have jumped */
1065 tb_reset_jump_recursive(tb_next
);
1069 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1071 tb_reset_jump_recursive2(tb
, 0);
1072 tb_reset_jump_recursive2(tb
, 1);
1075 #if defined(TARGET_HAS_ICE)
1076 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1078 target_phys_addr_t addr
;
1080 ram_addr_t ram_addr
;
1083 addr
= cpu_get_phys_page_debug(env
, pc
);
1084 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1086 pd
= IO_MEM_UNASSIGNED
;
1088 pd
= p
->phys_offset
;
1090 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1091 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1095 /* Add a watchpoint. */
1096 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1100 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1101 if (addr
== env
->watchpoint
[i
].vaddr
)
1104 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1107 i
= env
->nb_watchpoints
++;
1108 env
->watchpoint
[i
].vaddr
= addr
;
1109 tlb_flush_page(env
, addr
);
1110 /* FIXME: This flush is needed because of the hack to make memory ops
1111 terminate the TB. It can be removed once the proper IO trap and
1112 re-execute bits are in. */
1117 /* Remove a watchpoint. */
1118 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1122 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1123 if (addr
== env
->watchpoint
[i
].vaddr
) {
1124 env
->nb_watchpoints
--;
1125 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1126 tlb_flush_page(env
, addr
);
1133 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1134 breakpoint is reached */
1135 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1137 #if defined(TARGET_HAS_ICE)
1140 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1141 if (env
->breakpoints
[i
] == pc
)
1145 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1147 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1149 breakpoint_invalidate(env
, pc
);
1156 /* remove a breakpoint */
1157 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1159 #if defined(TARGET_HAS_ICE)
1161 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1162 if (env
->breakpoints
[i
] == pc
)
1167 env
->nb_breakpoints
--;
1168 if (i
< env
->nb_breakpoints
)
1169 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1171 breakpoint_invalidate(env
, pc
);
1178 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1179 CPU loop after each instruction */
1180 void cpu_single_step(CPUState
*env
, int enabled
)
1182 #if defined(TARGET_HAS_ICE)
1183 if (env
->singlestep_enabled
!= enabled
) {
1184 env
->singlestep_enabled
= enabled
;
1185 /* must flush all the translated code to avoid inconsistancies */
1186 /* XXX: only flush what is necessary */
1192 /* enable or disable low levels log */
1193 void cpu_set_log(int log_flags
)
1195 loglevel
= log_flags
;
1196 if (loglevel
&& !logfile
) {
1197 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1199 perror(logfilename
);
1202 #if !defined(CONFIG_SOFTMMU)
1203 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1205 static uint8_t logfile_buf
[4096];
1206 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1209 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1213 if (!loglevel
&& logfile
) {
1219 void cpu_set_log_filename(const char *filename
)
1221 logfilename
= strdup(filename
);
1226 cpu_set_log(loglevel
);
1229 /* mask must never be zero, except for A20 change call */
1230 void cpu_interrupt(CPUState
*env
, int mask
)
1232 TranslationBlock
*tb
;
1233 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1235 env
->interrupt_request
|= mask
;
1236 /* if the cpu is currently executing code, we must unlink it and
1237 all the potentially executing TB */
1238 tb
= env
->current_tb
;
1239 if (tb
&& !testandset(&interrupt_lock
)) {
1240 env
->current_tb
= NULL
;
1241 tb_reset_jump_recursive(tb
);
1242 resetlock(&interrupt_lock
);
1246 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1248 env
->interrupt_request
&= ~mask
;
1251 CPULogItem cpu_log_items
[] = {
1252 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1253 "show generated host assembly code for each compiled TB" },
1254 { CPU_LOG_TB_IN_ASM
, "in_asm",
1255 "show target assembly code for each compiled TB" },
1256 { CPU_LOG_TB_OP
, "op",
1257 "show micro ops for each compiled TB" },
1258 { CPU_LOG_TB_OP_OPT
, "op_opt",
1261 "before eflags optimization and "
1263 "after liveness analysis" },
1264 { CPU_LOG_INT
, "int",
1265 "show interrupts/exceptions in short format" },
1266 { CPU_LOG_EXEC
, "exec",
1267 "show trace before each executed TB (lots of logs)" },
1268 { CPU_LOG_TB_CPU
, "cpu",
1269 "show CPU state before block translation" },
1271 { CPU_LOG_PCALL
, "pcall",
1272 "show protected mode far calls/returns/exceptions" },
1275 { CPU_LOG_IOPORT
, "ioport",
1276 "show all i/o ports accesses" },
1281 static int cmp1(const char *s1
, int n
, const char *s2
)
1283 if (strlen(s2
) != n
)
1285 return memcmp(s1
, s2
, n
) == 0;
1288 /* takes a comma separated list of log masks. Return 0 if error. */
1289 int cpu_str_to_log_mask(const char *str
)
1298 p1
= strchr(p
, ',');
1301 if(cmp1(p
,p1
-p
,"all")) {
1302 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1306 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1307 if (cmp1(p
, p1
- p
, item
->name
))
1321 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1328 fprintf(stderr
, "qemu: fatal: ");
1329 vfprintf(stderr
, fmt
, ap
);
1330 fprintf(stderr
, "\n");
1332 if(env
->intercept
& INTERCEPT_SVM_MASK
) {
1333 /* most probably the virtual machine should not
1334 be shut down but rather caught by the VMM */
1335 vmexit(SVM_EXIT_SHUTDOWN
, 0);
1337 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1339 cpu_dump_state(env
, stderr
, fprintf
, 0);
1342 fprintf(logfile
, "qemu: fatal: ");
1343 vfprintf(logfile
, fmt
, ap2
);
1344 fprintf(logfile
, "\n");
1346 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1348 cpu_dump_state(env
, logfile
, fprintf
, 0);
1358 CPUState
*cpu_copy(CPUState
*env
)
1360 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1361 /* preserve chaining and index */
1362 CPUState
*next_cpu
= new_env
->next_cpu
;
1363 int cpu_index
= new_env
->cpu_index
;
1364 memcpy(new_env
, env
, sizeof(CPUState
));
1365 new_env
->next_cpu
= next_cpu
;
1366 new_env
->cpu_index
= cpu_index
;
1370 #if !defined(CONFIG_USER_ONLY)
1372 /* NOTE: if flush_global is true, also flush global entries (not
1374 void tlb_flush(CPUState
*env
, int flush_global
)
1378 #if defined(DEBUG_TLB)
1379 printf("tlb_flush:\n");
1381 /* must reset current TB so that interrupts cannot modify the
1382 links while we are modifying them */
1383 env
->current_tb
= NULL
;
1385 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1386 env
->tlb_table
[0][i
].addr_read
= -1;
1387 env
->tlb_table
[0][i
].addr_write
= -1;
1388 env
->tlb_table
[0][i
].addr_code
= -1;
1389 env
->tlb_table
[1][i
].addr_read
= -1;
1390 env
->tlb_table
[1][i
].addr_write
= -1;
1391 env
->tlb_table
[1][i
].addr_code
= -1;
1392 #if (NB_MMU_MODES >= 3)
1393 env
->tlb_table
[2][i
].addr_read
= -1;
1394 env
->tlb_table
[2][i
].addr_write
= -1;
1395 env
->tlb_table
[2][i
].addr_code
= -1;
1396 #if (NB_MMU_MODES == 4)
1397 env
->tlb_table
[3][i
].addr_read
= -1;
1398 env
->tlb_table
[3][i
].addr_write
= -1;
1399 env
->tlb_table
[3][i
].addr_code
= -1;
1404 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1406 #if !defined(CONFIG_SOFTMMU)
1407 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1410 if (env
->kqemu_enabled
) {
1411 kqemu_flush(env
, flush_global
);
1417 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1419 if (addr
== (tlb_entry
->addr_read
&
1420 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1421 addr
== (tlb_entry
->addr_write
&
1422 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1423 addr
== (tlb_entry
->addr_code
&
1424 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1425 tlb_entry
->addr_read
= -1;
1426 tlb_entry
->addr_write
= -1;
1427 tlb_entry
->addr_code
= -1;
1431 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1434 TranslationBlock
*tb
;
1436 #if defined(DEBUG_TLB)
1437 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1439 /* must reset current TB so that interrupts cannot modify the
1440 links while we are modifying them */
1441 env
->current_tb
= NULL
;
1443 addr
&= TARGET_PAGE_MASK
;
1444 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1445 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1446 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1447 #if (NB_MMU_MODES >= 3)
1448 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1449 #if (NB_MMU_MODES == 4)
1450 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1454 /* Discard jump cache entries for any tb which might potentially
1455 overlap the flushed page. */
1456 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1457 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1459 i
= tb_jmp_cache_hash_page(addr
);
1460 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1462 #if !defined(CONFIG_SOFTMMU)
1463 if (addr
< MMAP_AREA_END
)
1464 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1467 if (env
->kqemu_enabled
) {
1468 kqemu_flush_page(env
, addr
);
1473 /* update the TLBs so that writes to code in the virtual page 'addr'
1475 static void tlb_protect_code(ram_addr_t ram_addr
)
1477 cpu_physical_memory_reset_dirty(ram_addr
,
1478 ram_addr
+ TARGET_PAGE_SIZE
,
1482 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1483 tested for self modifying code */
1484 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1487 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1490 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1491 unsigned long start
, unsigned long length
)
1494 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1495 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1496 if ((addr
- start
) < length
) {
1497 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1502 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1506 unsigned long length
, start1
;
1510 start
&= TARGET_PAGE_MASK
;
1511 end
= TARGET_PAGE_ALIGN(end
);
1513 length
= end
- start
;
1516 len
= length
>> TARGET_PAGE_BITS
;
1518 /* XXX: should not depend on cpu context */
1520 if (env
->kqemu_enabled
) {
1523 for(i
= 0; i
< len
; i
++) {
1524 kqemu_set_notdirty(env
, addr
);
1525 addr
+= TARGET_PAGE_SIZE
;
1529 mask
= ~dirty_flags
;
1530 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1531 for(i
= 0; i
< len
; i
++)
1534 /* we modify the TLB cache so that the dirty bit will be set again
1535 when accessing the range */
1536 start1
= start
+ (unsigned long)phys_ram_base
;
1537 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1538 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1539 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1540 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1541 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1542 #if (NB_MMU_MODES >= 3)
1543 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1544 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1545 #if (NB_MMU_MODES == 4)
1546 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1547 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1552 #if !defined(CONFIG_SOFTMMU)
1553 /* XXX: this is expensive */
1559 for(i
= 0; i
< L1_SIZE
; i
++) {
1562 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1563 for(j
= 0; j
< L2_SIZE
; j
++) {
1564 if (p
->valid_tag
== virt_valid_tag
&&
1565 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1566 (p
->prot
& PROT_WRITE
)) {
1567 if (addr
< MMAP_AREA_END
) {
1568 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1569 p
->prot
& ~PROT_WRITE
);
1572 addr
+= TARGET_PAGE_SIZE
;
1581 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1583 ram_addr_t ram_addr
;
1585 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1586 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1587 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1588 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1589 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1594 /* update the TLB according to the current state of the dirty bits */
1595 void cpu_tlb_update_dirty(CPUState
*env
)
1598 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1599 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1600 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1601 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1602 #if (NB_MMU_MODES >= 3)
1603 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1604 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1605 #if (NB_MMU_MODES == 4)
1606 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1607 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1612 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1613 unsigned long start
)
1616 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1617 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1618 if (addr
== start
) {
1619 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1624 /* update the TLB corresponding to virtual page vaddr and phys addr
1625 addr so that it is no longer dirty */
1626 static inline void tlb_set_dirty(CPUState
*env
,
1627 unsigned long addr
, target_ulong vaddr
)
1631 addr
&= TARGET_PAGE_MASK
;
1632 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1633 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1634 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1635 #if (NB_MMU_MODES >= 3)
1636 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1637 #if (NB_MMU_MODES == 4)
1638 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1643 /* add a new TLB entry. At most one entry for a given virtual address
1644 is permitted. Return 0 if OK or 2 if the page could not be mapped
1645 (can only happen in non SOFTMMU mode for I/O pages or pages
1646 conflicting with the host address space). */
1647 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1648 target_phys_addr_t paddr
, int prot
,
1649 int mmu_idx
, int is_softmmu
)
1654 target_ulong address
;
1655 target_phys_addr_t addend
;
1660 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1662 pd
= IO_MEM_UNASSIGNED
;
1664 pd
= p
->phys_offset
;
1666 #if defined(DEBUG_TLB)
1667 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1668 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1672 #if !defined(CONFIG_SOFTMMU)
1676 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1677 /* IO memory case */
1678 address
= vaddr
| pd
;
1681 /* standard memory */
1683 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1686 /* Make accesses to pages with watchpoints go via the
1687 watchpoint trap routines. */
1688 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1689 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1690 if (address
& ~TARGET_PAGE_MASK
) {
1691 env
->watchpoint
[i
].addend
= 0;
1692 address
= vaddr
| io_mem_watch
;
1694 env
->watchpoint
[i
].addend
= pd
- paddr
+
1695 (unsigned long) phys_ram_base
;
1696 /* TODO: Figure out how to make read watchpoints coexist
1698 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1703 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1705 te
= &env
->tlb_table
[mmu_idx
][index
];
1706 te
->addend
= addend
;
1707 if (prot
& PAGE_READ
) {
1708 te
->addr_read
= address
;
1712 if (prot
& PAGE_EXEC
) {
1713 te
->addr_code
= address
;
1717 if (prot
& PAGE_WRITE
) {
1718 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1719 (pd
& IO_MEM_ROMD
)) {
1720 /* write access calls the I/O callback */
1721 te
->addr_write
= vaddr
|
1722 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1723 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1724 !cpu_physical_memory_is_dirty(pd
)) {
1725 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1727 te
->addr_write
= address
;
1730 te
->addr_write
= -1;
1733 #if !defined(CONFIG_SOFTMMU)
1735 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1736 /* IO access: no mapping is done as it will be handled by the
1738 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1743 if (vaddr
>= MMAP_AREA_END
) {
1746 if (prot
& PROT_WRITE
) {
1747 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1748 #if defined(TARGET_HAS_SMC) || 1
1751 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1752 !cpu_physical_memory_is_dirty(pd
))) {
1753 /* ROM: we do as if code was inside */
1754 /* if code is present, we only map as read only and save the
1758 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1761 vp
->valid_tag
= virt_valid_tag
;
1762 prot
&= ~PAGE_WRITE
;
1765 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1766 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1767 if (map_addr
== MAP_FAILED
) {
1768 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1778 /* called from signal handler: invalidate the code and unprotect the
1779 page. Return TRUE if the fault was succesfully handled. */
1780 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1782 #if !defined(CONFIG_SOFTMMU)
1785 #if defined(DEBUG_TLB)
1786 printf("page_unprotect: addr=0x%08x\n", addr
);
1788 addr
&= TARGET_PAGE_MASK
;
1790 /* if it is not mapped, no need to worry here */
1791 if (addr
>= MMAP_AREA_END
)
1793 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1796 /* NOTE: in this case, validate_tag is _not_ tested as it
1797 validates only the code TLB */
1798 if (vp
->valid_tag
!= virt_valid_tag
)
1800 if (!(vp
->prot
& PAGE_WRITE
))
1802 #if defined(DEBUG_TLB)
1803 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1804 addr
, vp
->phys_addr
, vp
->prot
);
1806 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1807 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1808 (unsigned long)addr
, vp
->prot
);
1809 /* set the dirty bit */
1810 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1811 /* flush the code inside */
1812 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1821 void tlb_flush(CPUState
*env
, int flush_global
)
1825 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1829 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1830 target_phys_addr_t paddr
, int prot
,
1831 int mmu_idx
, int is_softmmu
)
1836 /* dump memory mappings */
1837 void page_dump(FILE *f
)
1839 unsigned long start
, end
;
1840 int i
, j
, prot
, prot1
;
1843 fprintf(f
, "%-8s %-8s %-8s %s\n",
1844 "start", "end", "size", "prot");
1848 for(i
= 0; i
<= L1_SIZE
; i
++) {
1853 for(j
= 0;j
< L2_SIZE
; j
++) {
1858 if (prot1
!= prot
) {
1859 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1861 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1862 start
, end
, end
- start
,
1863 prot
& PAGE_READ
? 'r' : '-',
1864 prot
& PAGE_WRITE
? 'w' : '-',
1865 prot
& PAGE_EXEC
? 'x' : '-');
1879 int page_get_flags(target_ulong address
)
1883 p
= page_find(address
>> TARGET_PAGE_BITS
);
1889 /* modify the flags of a page and invalidate the code if
1890 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1891 depending on PAGE_WRITE */
1892 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1897 start
= start
& TARGET_PAGE_MASK
;
1898 end
= TARGET_PAGE_ALIGN(end
);
1899 if (flags
& PAGE_WRITE
)
1900 flags
|= PAGE_WRITE_ORG
;
1901 spin_lock(&tb_lock
);
1902 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1903 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1904 /* if the write protection is set, then we invalidate the code
1906 if (!(p
->flags
& PAGE_WRITE
) &&
1907 (flags
& PAGE_WRITE
) &&
1909 tb_invalidate_phys_page(addr
, 0, NULL
);
1913 spin_unlock(&tb_lock
);
1916 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
1922 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
1923 start
= start
& TARGET_PAGE_MASK
;
1926 /* we've wrapped around */
1928 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1929 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1932 if( !(p
->flags
& PAGE_VALID
) )
1935 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
1937 if (flags
& PAGE_WRITE
) {
1938 if (!(p
->flags
& PAGE_WRITE_ORG
))
1940 /* unprotect the page if it was put read-only because it
1941 contains translated code */
1942 if (!(p
->flags
& PAGE_WRITE
)) {
1943 if (!page_unprotect(addr
, 0, NULL
))
1952 /* called from signal handler: invalidate the code and unprotect the
1953 page. Return TRUE if the fault was succesfully handled. */
1954 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1956 unsigned int page_index
, prot
, pindex
;
1958 target_ulong host_start
, host_end
, addr
;
1960 host_start
= address
& qemu_host_page_mask
;
1961 page_index
= host_start
>> TARGET_PAGE_BITS
;
1962 p1
= page_find(page_index
);
1965 host_end
= host_start
+ qemu_host_page_size
;
1968 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1972 /* if the page was really writable, then we change its
1973 protection back to writable */
1974 if (prot
& PAGE_WRITE_ORG
) {
1975 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1976 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1977 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1978 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1979 p1
[pindex
].flags
|= PAGE_WRITE
;
1980 /* and since the content will be modified, we must invalidate
1981 the corresponding translated code. */
1982 tb_invalidate_phys_page(address
, pc
, puc
);
1983 #ifdef DEBUG_TB_CHECK
1984 tb_invalidate_check(address
);
1992 static inline void tlb_set_dirty(CPUState
*env
,
1993 unsigned long addr
, target_ulong vaddr
)
1996 #endif /* defined(CONFIG_USER_ONLY) */
1998 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2000 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2002 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2005 if (addr > start_addr) \
2008 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2009 if (start_addr2 > 0) \
2013 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2014 end_addr2 = TARGET_PAGE_SIZE - 1; \
2016 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2017 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2022 /* register physical memory. 'size' must be a multiple of the target
2023 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2025 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2027 ram_addr_t phys_offset
)
2029 target_phys_addr_t addr
, end_addr
;
2032 ram_addr_t orig_size
= size
;
2035 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2036 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2037 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2038 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2039 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2040 ram_addr_t orig_memory
= p
->phys_offset
;
2041 target_phys_addr_t start_addr2
, end_addr2
;
2042 int need_subpage
= 0;
2044 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2046 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2047 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2048 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2049 &p
->phys_offset
, orig_memory
);
2051 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2054 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2056 p
->phys_offset
= phys_offset
;
2057 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2058 (phys_offset
& IO_MEM_ROMD
))
2059 phys_offset
+= TARGET_PAGE_SIZE
;
2062 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2063 p
->phys_offset
= phys_offset
;
2064 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2065 (phys_offset
& IO_MEM_ROMD
))
2066 phys_offset
+= TARGET_PAGE_SIZE
;
2068 target_phys_addr_t start_addr2
, end_addr2
;
2069 int need_subpage
= 0;
2071 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2072 end_addr2
, need_subpage
);
2074 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2075 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2076 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2077 subpage_register(subpage
, start_addr2
, end_addr2
,
2084 /* since each CPU stores ram addresses in its TLB cache, we must
2085 reset the modified entries */
2087 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2092 /* XXX: temporary until new memory mapping API */
2093 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2097 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2099 return IO_MEM_UNASSIGNED
;
2100 return p
->phys_offset
;
2103 /* XXX: better than nothing */
2104 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2107 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2108 fprintf(stderr
, "Not enough memory (requested_size = %lu, max memory = %" PRIu64
")\n",
2109 size
, (uint64_t)phys_ram_size
);
2112 addr
= phys_ram_alloc_offset
;
2113 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2117 void qemu_ram_free(ram_addr_t addr
)
2121 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2123 #ifdef DEBUG_UNASSIGNED
2124 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2127 do_unassigned_access(addr
, 0, 0, 0);
2129 do_unassigned_access(addr
, 0, 0, 0);
2134 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2136 #ifdef DEBUG_UNASSIGNED
2137 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2140 do_unassigned_access(addr
, 1, 0, 0);
2142 do_unassigned_access(addr
, 1, 0, 0);
2146 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2147 unassigned_mem_readb
,
2148 unassigned_mem_readb
,
2149 unassigned_mem_readb
,
2152 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2153 unassigned_mem_writeb
,
2154 unassigned_mem_writeb
,
2155 unassigned_mem_writeb
,
2158 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2160 unsigned long ram_addr
;
2162 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2163 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2164 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2165 #if !defined(CONFIG_USER_ONLY)
2166 tb_invalidate_phys_page_fast(ram_addr
, 1);
2167 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2170 stb_p((uint8_t *)(long)addr
, val
);
2172 if (cpu_single_env
->kqemu_enabled
&&
2173 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2174 kqemu_modify_page(cpu_single_env
, ram_addr
);
2176 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2177 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2178 /* we remove the notdirty callback only if the code has been
2180 if (dirty_flags
== 0xff)
2181 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2184 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2186 unsigned long ram_addr
;
2188 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2189 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2190 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2191 #if !defined(CONFIG_USER_ONLY)
2192 tb_invalidate_phys_page_fast(ram_addr
, 2);
2193 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2196 stw_p((uint8_t *)(long)addr
, val
);
2198 if (cpu_single_env
->kqemu_enabled
&&
2199 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2200 kqemu_modify_page(cpu_single_env
, ram_addr
);
2202 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2203 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2204 /* we remove the notdirty callback only if the code has been
2206 if (dirty_flags
== 0xff)
2207 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2210 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2212 unsigned long ram_addr
;
2214 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2215 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2216 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2217 #if !defined(CONFIG_USER_ONLY)
2218 tb_invalidate_phys_page_fast(ram_addr
, 4);
2219 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2222 stl_p((uint8_t *)(long)addr
, val
);
2224 if (cpu_single_env
->kqemu_enabled
&&
2225 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2226 kqemu_modify_page(cpu_single_env
, ram_addr
);
2228 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2229 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2230 /* we remove the notdirty callback only if the code has been
2232 if (dirty_flags
== 0xff)
2233 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2236 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2237 NULL
, /* never used */
2238 NULL
, /* never used */
2239 NULL
, /* never used */
2242 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2243 notdirty_mem_writeb
,
2244 notdirty_mem_writew
,
2245 notdirty_mem_writel
,
2248 #if defined(CONFIG_SOFTMMU)
2249 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2250 so these check for a hit then pass through to the normal out-of-line
2252 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2254 return ldub_phys(addr
);
2257 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2259 return lduw_phys(addr
);
2262 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2264 return ldl_phys(addr
);
2267 /* Generate a debug exception if a watchpoint has been hit.
2268 Returns the real physical address of the access. addr will be a host
2269 address in case of a RAM location. */
2270 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2272 CPUState
*env
= cpu_single_env
;
2274 target_ulong retaddr
;
2278 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2279 watch
= env
->watchpoint
[i
].vaddr
;
2280 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2281 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2282 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2283 cpu_single_env
->watchpoint_hit
= i
+ 1;
2284 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2292 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2295 addr
= check_watchpoint(addr
);
2296 stb_phys(addr
, val
);
2299 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2302 addr
= check_watchpoint(addr
);
2303 stw_phys(addr
, val
);
2306 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2309 addr
= check_watchpoint(addr
);
2310 stl_phys(addr
, val
);
2313 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2319 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2326 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2332 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2333 #if defined(DEBUG_SUBPAGE)
2334 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2335 mmio
, len
, addr
, idx
);
2337 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2342 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2343 uint32_t value
, unsigned int len
)
2347 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2348 #if defined(DEBUG_SUBPAGE)
2349 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2350 mmio
, len
, addr
, idx
, value
);
2352 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2355 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2357 #if defined(DEBUG_SUBPAGE)
2358 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2361 return subpage_readlen(opaque
, addr
, 0);
2364 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2367 #if defined(DEBUG_SUBPAGE)
2368 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2370 subpage_writelen(opaque
, addr
, value
, 0);
2373 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2375 #if defined(DEBUG_SUBPAGE)
2376 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2379 return subpage_readlen(opaque
, addr
, 1);
2382 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2385 #if defined(DEBUG_SUBPAGE)
2386 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2388 subpage_writelen(opaque
, addr
, value
, 1);
2391 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2393 #if defined(DEBUG_SUBPAGE)
2394 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2397 return subpage_readlen(opaque
, addr
, 2);
2400 static void subpage_writel (void *opaque
,
2401 target_phys_addr_t addr
, uint32_t value
)
2403 #if defined(DEBUG_SUBPAGE)
2404 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2406 subpage_writelen(opaque
, addr
, value
, 2);
2409 static CPUReadMemoryFunc
*subpage_read
[] = {
2415 static CPUWriteMemoryFunc
*subpage_write
[] = {
2421 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2427 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2429 idx
= SUBPAGE_IDX(start
);
2430 eidx
= SUBPAGE_IDX(end
);
2431 #if defined(DEBUG_SUBPAGE)
2432 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2433 mmio
, start
, end
, idx
, eidx
, memory
);
2435 memory
>>= IO_MEM_SHIFT
;
2436 for (; idx
<= eidx
; idx
++) {
2437 for (i
= 0; i
< 4; i
++) {
2438 if (io_mem_read
[memory
][i
]) {
2439 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2440 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2442 if (io_mem_write
[memory
][i
]) {
2443 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2444 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2452 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2458 mmio
= qemu_mallocz(sizeof(subpage_t
));
2461 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2462 #if defined(DEBUG_SUBPAGE)
2463 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2464 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2466 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2467 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2473 static void io_mem_init(void)
2475 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2476 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2477 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2480 #if defined(CONFIG_SOFTMMU)
2481 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2482 watch_mem_write
, NULL
);
2484 /* alloc dirty bits array */
2485 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2486 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2489 /* mem_read and mem_write are arrays of functions containing the
2490 function to access byte (index 0), word (index 1) and dword (index
2491 2). Functions can be omitted with a NULL function pointer. The
2492 registered functions may be modified dynamically later.
2493 If io_index is non zero, the corresponding io zone is
2494 modified. If it is zero, a new io zone is allocated. The return
2495 value can be used with cpu_register_physical_memory(). (-1) is
2496 returned if error. */
2497 int cpu_register_io_memory(int io_index
,
2498 CPUReadMemoryFunc
**mem_read
,
2499 CPUWriteMemoryFunc
**mem_write
,
2502 int i
, subwidth
= 0;
2504 if (io_index
<= 0) {
2505 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2507 io_index
= io_mem_nb
++;
2509 if (io_index
>= IO_MEM_NB_ENTRIES
)
2513 for(i
= 0;i
< 3; i
++) {
2514 if (!mem_read
[i
] || !mem_write
[i
])
2515 subwidth
= IO_MEM_SUBWIDTH
;
2516 io_mem_read
[io_index
][i
] = mem_read
[i
];
2517 io_mem_write
[io_index
][i
] = mem_write
[i
];
2519 io_mem_opaque
[io_index
] = opaque
;
2520 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2523 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2525 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2528 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2530 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2533 /* physical memory access (slow version, mainly for debug) */
2534 #if defined(CONFIG_USER_ONLY)
2535 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2536 int len
, int is_write
)
2543 page
= addr
& TARGET_PAGE_MASK
;
2544 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2547 flags
= page_get_flags(page
);
2548 if (!(flags
& PAGE_VALID
))
2551 if (!(flags
& PAGE_WRITE
))
2553 /* XXX: this code should not depend on lock_user */
2554 if (!(p
= lock_user(VERIFY_WRITE
, addr
, len
, 0)))
2555 /* FIXME - should this return an error rather than just fail? */
2557 memcpy(p
, buf
, len
);
2558 unlock_user(p
, addr
, len
);
2560 if (!(flags
& PAGE_READ
))
2562 /* XXX: this code should not depend on lock_user */
2563 if (!(p
= lock_user(VERIFY_READ
, addr
, len
, 1)))
2564 /* FIXME - should this return an error rather than just fail? */
2566 memcpy(buf
, p
, len
);
2567 unlock_user(p
, addr
, 0);
2576 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2577 int len
, int is_write
)
2582 target_phys_addr_t page
;
2587 page
= addr
& TARGET_PAGE_MASK
;
2588 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2591 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2593 pd
= IO_MEM_UNASSIGNED
;
2595 pd
= p
->phys_offset
;
2599 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2600 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2601 /* XXX: could force cpu_single_env to NULL to avoid
2603 if (l
>= 4 && ((addr
& 3) == 0)) {
2604 /* 32 bit write access */
2606 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2608 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2609 /* 16 bit write access */
2611 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2614 /* 8 bit write access */
2616 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2620 unsigned long addr1
;
2621 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2623 ptr
= phys_ram_base
+ addr1
;
2624 memcpy(ptr
, buf
, l
);
2625 if (!cpu_physical_memory_is_dirty(addr1
)) {
2626 /* invalidate code */
2627 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2629 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2630 (0xff & ~CODE_DIRTY_FLAG
);
2634 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2635 !(pd
& IO_MEM_ROMD
)) {
2637 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2638 if (l
>= 4 && ((addr
& 3) == 0)) {
2639 /* 32 bit read access */
2640 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2643 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2644 /* 16 bit read access */
2645 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2649 /* 8 bit read access */
2650 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2656 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2657 (addr
& ~TARGET_PAGE_MASK
);
2658 memcpy(buf
, ptr
, l
);
2667 /* used for ROM loading : can write in RAM and ROM */
2668 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2669 const uint8_t *buf
, int len
)
2673 target_phys_addr_t page
;
2678 page
= addr
& TARGET_PAGE_MASK
;
2679 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2682 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2684 pd
= IO_MEM_UNASSIGNED
;
2686 pd
= p
->phys_offset
;
2689 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2690 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2691 !(pd
& IO_MEM_ROMD
)) {
2694 unsigned long addr1
;
2695 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2697 ptr
= phys_ram_base
+ addr1
;
2698 memcpy(ptr
, buf
, l
);
2707 /* warning: addr must be aligned */
2708 uint32_t ldl_phys(target_phys_addr_t addr
)
2716 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2718 pd
= IO_MEM_UNASSIGNED
;
2720 pd
= p
->phys_offset
;
2723 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2724 !(pd
& IO_MEM_ROMD
)) {
2726 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2727 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2730 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2731 (addr
& ~TARGET_PAGE_MASK
);
2737 /* warning: addr must be aligned */
2738 uint64_t ldq_phys(target_phys_addr_t addr
)
2746 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2748 pd
= IO_MEM_UNASSIGNED
;
2750 pd
= p
->phys_offset
;
2753 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2754 !(pd
& IO_MEM_ROMD
)) {
2756 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2757 #ifdef TARGET_WORDS_BIGENDIAN
2758 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2759 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2761 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2762 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2766 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2767 (addr
& ~TARGET_PAGE_MASK
);
2774 uint32_t ldub_phys(target_phys_addr_t addr
)
2777 cpu_physical_memory_read(addr
, &val
, 1);
2782 uint32_t lduw_phys(target_phys_addr_t addr
)
2785 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2786 return tswap16(val
);
2789 /* warning: addr must be aligned. The ram page is not masked as dirty
2790 and the code inside is not invalidated. It is useful if the dirty
2791 bits are used to track modified PTEs */
2792 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2799 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2801 pd
= IO_MEM_UNASSIGNED
;
2803 pd
= p
->phys_offset
;
2806 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2807 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2808 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2810 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2811 (addr
& ~TARGET_PAGE_MASK
);
2816 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2823 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2825 pd
= IO_MEM_UNASSIGNED
;
2827 pd
= p
->phys_offset
;
2830 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2831 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2832 #ifdef TARGET_WORDS_BIGENDIAN
2833 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2834 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2836 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2837 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2840 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2841 (addr
& ~TARGET_PAGE_MASK
);
2846 /* warning: addr must be aligned */
2847 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2854 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2856 pd
= IO_MEM_UNASSIGNED
;
2858 pd
= p
->phys_offset
;
2861 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2862 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2863 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2865 unsigned long addr1
;
2866 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2868 ptr
= phys_ram_base
+ addr1
;
2870 if (!cpu_physical_memory_is_dirty(addr1
)) {
2871 /* invalidate code */
2872 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2874 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2875 (0xff & ~CODE_DIRTY_FLAG
);
2881 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2884 cpu_physical_memory_write(addr
, &v
, 1);
2888 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2890 uint16_t v
= tswap16(val
);
2891 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2895 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2898 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2903 /* virtual memory access for debug */
2904 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2905 uint8_t *buf
, int len
, int is_write
)
2908 target_phys_addr_t phys_addr
;
2912 page
= addr
& TARGET_PAGE_MASK
;
2913 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2914 /* if no physical page mapped, return an error */
2915 if (phys_addr
== -1)
2917 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2920 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2929 void dump_exec_info(FILE *f
,
2930 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2932 int i
, target_code_size
, max_target_code_size
;
2933 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2934 TranslationBlock
*tb
;
2936 target_code_size
= 0;
2937 max_target_code_size
= 0;
2939 direct_jmp_count
= 0;
2940 direct_jmp2_count
= 0;
2941 for(i
= 0; i
< nb_tbs
; i
++) {
2943 target_code_size
+= tb
->size
;
2944 if (tb
->size
> max_target_code_size
)
2945 max_target_code_size
= tb
->size
;
2946 if (tb
->page_addr
[1] != -1)
2948 if (tb
->tb_next_offset
[0] != 0xffff) {
2950 if (tb
->tb_next_offset
[1] != 0xffff) {
2951 direct_jmp2_count
++;
2955 /* XXX: avoid using doubles ? */
2956 cpu_fprintf(f
, "Translation buffer state:\n");
2957 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2958 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2959 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2960 max_target_code_size
);
2961 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2962 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2963 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2964 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2966 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2967 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2969 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2971 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2972 cpu_fprintf(f
, "\nStatistics:\n");
2973 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2974 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2975 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2976 #ifdef CONFIG_PROFILER
2979 tot
= dyngen_interm_time
+ dyngen_code_time
;
2980 cpu_fprintf(f
, "JIT cycles %" PRId64
" (%0.3f s at 2.4 GHz)\n",
2982 cpu_fprintf(f
, "translated TBs %" PRId64
" (aborted=%" PRId64
" %0.1f%%)\n",
2984 dyngen_tb_count1
- dyngen_tb_count
,
2985 dyngen_tb_count1
? (double)(dyngen_tb_count1
- dyngen_tb_count
) / dyngen_tb_count1
* 100.0 : 0);
2986 cpu_fprintf(f
, "avg ops/TB %0.1f max=%d\n",
2987 dyngen_tb_count
? (double)dyngen_op_count
/ dyngen_tb_count
: 0, dyngen_op_count_max
);
2988 cpu_fprintf(f
, "old ops/total ops %0.1f%%\n",
2989 dyngen_op_count
? (double)dyngen_old_op_count
/ dyngen_op_count
* 100.0 : 0);
2990 cpu_fprintf(f
, "deleted ops/TB %0.2f\n",
2992 (double)dyngen_tcg_del_op_count
/ dyngen_tb_count
: 0);
2993 cpu_fprintf(f
, "cycles/op %0.1f\n",
2994 dyngen_op_count
? (double)tot
/ dyngen_op_count
: 0);
2995 cpu_fprintf(f
, "cycles/in byte %0.1f\n",
2996 dyngen_code_in_len
? (double)tot
/ dyngen_code_in_len
: 0);
2997 cpu_fprintf(f
, "cycles/out byte %0.1f\n",
2998 dyngen_code_out_len
? (double)tot
/ dyngen_code_out_len
: 0);
3001 cpu_fprintf(f
, " gen_interm time %0.1f%%\n",
3002 (double)dyngen_interm_time
/ tot
* 100.0);
3003 cpu_fprintf(f
, " gen_code time %0.1f%%\n",
3004 (double)dyngen_code_time
/ tot
* 100.0);
3005 cpu_fprintf(f
, "cpu_restore count %" PRId64
"\n",
3006 dyngen_restore_count
);
3007 cpu_fprintf(f
, " avg cycles %0.1f\n",
3008 dyngen_restore_count
? (double)dyngen_restore_time
/ dyngen_restore_count
: 0);
3010 extern void dump_op_count(void);
3017 #if !defined(CONFIG_USER_ONLY)
3019 #define MMUSUFFIX _cmmu
3020 #define GETPC() NULL
3021 #define env cpu_single_env
3022 #define SOFTMMU_CODE_ACCESS
3025 #include "softmmu_template.h"
3028 #include "softmmu_template.h"
3031 #include "softmmu_template.h"
3034 #include "softmmu_template.h"