2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
63 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
64 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
65 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
67 /* any access to the tbs or the page table must use this lock */
68 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
70 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
71 uint8_t *code_gen_ptr
;
75 uint8_t *phys_ram_base
;
76 uint8_t *phys_ram_dirty
;
78 typedef struct PageDesc
{
79 /* list of TBs intersecting this ram page */
80 TranslationBlock
*first_tb
;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count
;
85 #if defined(CONFIG_USER_ONLY)
90 typedef struct PhysPageDesc
{
91 /* offset in host memory of the page + io_index in the low 12 bits */
95 /* Note: the VirtPage handling is absolete and will be suppressed
97 typedef struct VirtPageDesc
{
98 /* physical address of code page. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 target_ulong phys_addr
;
101 unsigned int valid_tag
;
102 #if !defined(CONFIG_SOFTMMU)
103 /* original page access rights. It is valid only if 'valid_tag'
104 matches 'virt_valid_tag' */
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
112 #define L1_SIZE (1 << L1_BITS)
113 #define L2_SIZE (1 << L2_BITS)
115 static void io_mem_init(void);
117 unsigned long qemu_real_host_page_size
;
118 unsigned long qemu_host_page_bits
;
119 unsigned long qemu_host_page_size
;
120 unsigned long qemu_host_page_mask
;
122 /* XXX: for system emulation, it could just be an array */
123 static PageDesc
*l1_map
[L1_SIZE
];
124 PhysPageDesc
**l1_phys_map
;
126 #if !defined(CONFIG_USER_ONLY)
127 #if TARGET_LONG_BITS > 32
128 #define VIRT_L_BITS 9
129 #define VIRT_L_SIZE (1 << VIRT_L_BITS)
130 static void *l1_virt_map
[VIRT_L_SIZE
];
132 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
134 static unsigned int virt_valid_tag
;
137 /* io memory support */
138 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
139 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
140 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
141 static int io_mem_nb
;
144 char *logfilename
= "/tmp/qemu.log";
149 static int tlb_flush_count
;
150 static int tb_flush_count
;
151 static int tb_phys_invalidate_count
;
153 static void page_init(void)
155 /* NOTE: we can always suppose that qemu_host_page_size >=
159 SYSTEM_INFO system_info
;
162 GetSystemInfo(&system_info
);
163 qemu_real_host_page_size
= system_info
.dwPageSize
;
165 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
166 PAGE_EXECUTE_READWRITE
, &old_protect
);
169 qemu_real_host_page_size
= getpagesize();
171 unsigned long start
, end
;
173 start
= (unsigned long)code_gen_buffer
;
174 start
&= ~(qemu_real_host_page_size
- 1);
176 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
177 end
+= qemu_real_host_page_size
- 1;
178 end
&= ~(qemu_real_host_page_size
- 1);
180 mprotect((void *)start
, end
- start
,
181 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
185 if (qemu_host_page_size
== 0)
186 qemu_host_page_size
= qemu_real_host_page_size
;
187 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
188 qemu_host_page_size
= TARGET_PAGE_SIZE
;
189 qemu_host_page_bits
= 0;
190 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
191 qemu_host_page_bits
++;
192 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
193 #if !defined(CONFIG_USER_ONLY)
196 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
197 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
200 static inline PageDesc
*page_find_alloc(unsigned int index
)
204 lp
= &l1_map
[index
>> L2_BITS
];
207 /* allocate if not found */
208 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
209 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
212 return p
+ (index
& (L2_SIZE
- 1));
215 static inline PageDesc
*page_find(unsigned int index
)
219 p
= l1_map
[index
>> L2_BITS
];
222 return p
+ (index
& (L2_SIZE
- 1));
225 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
229 p
= (void **)l1_phys_map
;
230 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
232 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
233 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
235 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
238 /* allocate if not found */
241 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
242 memset(p
, 0, sizeof(void *) * L1_SIZE
);
246 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
249 /* allocate if not found */
252 p
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
253 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
256 return ((PhysPageDesc
*)p
) + (index
& (L2_SIZE
- 1));
259 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
261 return phys_page_find_alloc(index
, 0);
264 #if !defined(CONFIG_USER_ONLY)
265 static void tlb_protect_code(CPUState
*env
, ram_addr_t ram_addr
,
267 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
270 static VirtPageDesc
*virt_page_find_alloc(target_ulong index
, int alloc
)
272 #if TARGET_LONG_BITS > 32
276 lp
= p
+ ((index
>> (5 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
281 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
284 lp
= p
+ ((index
>> (4 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
289 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
292 lp
= p
+ ((index
>> (3 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
297 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
300 lp
= p
+ ((index
>> (2 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
305 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
308 lp
= p
+ ((index
>> (1 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
313 p
= qemu_mallocz(sizeof(VirtPageDesc
) * VIRT_L_SIZE
);
316 return ((VirtPageDesc
*)p
) + (index
& (VIRT_L_SIZE
- 1));
318 VirtPageDesc
*p
, **lp
;
320 lp
= &l1_virt_map
[index
>> L2_BITS
];
323 /* allocate if not found */
326 p
= qemu_mallocz(sizeof(VirtPageDesc
) * L2_SIZE
);
329 return p
+ (index
& (L2_SIZE
- 1));
333 static inline VirtPageDesc
*virt_page_find(target_ulong index
)
335 return virt_page_find_alloc(index
, 0);
338 #if TARGET_LONG_BITS > 32
339 static void virt_page_flush_internal(void **p
, int level
)
343 VirtPageDesc
*q
= (VirtPageDesc
*)p
;
344 for(i
= 0; i
< VIRT_L_SIZE
; i
++)
348 for(i
= 0; i
< VIRT_L_SIZE
; i
++) {
350 virt_page_flush_internal(p
[i
], level
);
356 static void virt_page_flush(void)
360 if (virt_valid_tag
== 0) {
362 #if TARGET_LONG_BITS > 32
363 virt_page_flush_internal(l1_virt_map
, 5);
368 for(i
= 0; i
< L1_SIZE
; i
++) {
371 for(j
= 0; j
< L2_SIZE
; j
++)
380 static void virt_page_flush(void)
385 void cpu_exec_init(void)
388 code_gen_ptr
= code_gen_buffer
;
394 static inline void invalidate_page_bitmap(PageDesc
*p
)
396 if (p
->code_bitmap
) {
397 qemu_free(p
->code_bitmap
);
398 p
->code_bitmap
= NULL
;
400 p
->code_write_count
= 0;
403 /* set to NULL all the 'first_tb' fields in all PageDescs */
404 static void page_flush_tb(void)
409 for(i
= 0; i
< L1_SIZE
; i
++) {
412 for(j
= 0; j
< L2_SIZE
; j
++) {
414 invalidate_page_bitmap(p
);
421 /* flush all the translation blocks */
422 /* XXX: tb_flush is currently not thread safe */
423 void tb_flush(CPUState
*env
)
425 #if defined(DEBUG_FLUSH)
426 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
427 code_gen_ptr
- code_gen_buffer
,
429 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
432 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
435 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
438 code_gen_ptr
= code_gen_buffer
;
439 /* XXX: flush processor icache at this point if cache flush is
444 #ifdef DEBUG_TB_CHECK
446 static void tb_invalidate_check(unsigned long address
)
448 TranslationBlock
*tb
;
450 address
&= TARGET_PAGE_MASK
;
451 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
452 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
453 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
454 address
>= tb
->pc
+ tb
->size
)) {
455 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
456 address
, tb
->pc
, tb
->size
);
462 /* verify that all the pages have correct rights for code */
463 static void tb_page_check(void)
465 TranslationBlock
*tb
;
466 int i
, flags1
, flags2
;
468 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
469 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
470 flags1
= page_get_flags(tb
->pc
);
471 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
472 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
473 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
474 tb
->pc
, tb
->size
, flags1
, flags2
);
480 void tb_jmp_check(TranslationBlock
*tb
)
482 TranslationBlock
*tb1
;
485 /* suppress any remaining jumps to this TB */
489 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
492 tb1
= tb1
->jmp_next
[n1
];
494 /* check end of list */
496 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
502 /* invalidate one TB */
503 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
506 TranslationBlock
*tb1
;
510 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
513 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
517 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
519 TranslationBlock
*tb1
;
525 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
527 *ptb
= tb1
->page_next
[n1
];
530 ptb
= &tb1
->page_next
[n1
];
534 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
536 TranslationBlock
*tb1
, **ptb
;
539 ptb
= &tb
->jmp_next
[n
];
542 /* find tb(n) in circular list */
546 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
547 if (n1
== n
&& tb1
== tb
)
550 ptb
= &tb1
->jmp_first
;
552 ptb
= &tb1
->jmp_next
[n1
];
555 /* now we can suppress tb(n) from the list */
556 *ptb
= tb
->jmp_next
[n
];
558 tb
->jmp_next
[n
] = NULL
;
562 /* reset the jump entry 'n' of a TB so that it is not chained to
564 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
566 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
569 static inline void tb_invalidate(TranslationBlock
*tb
)
572 TranslationBlock
*tb1
, *tb2
, **ptb
;
574 tb_invalidated_flag
= 1;
576 /* remove the TB from the hash list */
577 h
= tb_hash_func(tb
->pc
);
581 /* NOTE: the TB is not necessarily linked in the hash. It
582 indicates that it is not currently used */
586 *ptb
= tb1
->hash_next
;
589 ptb
= &tb1
->hash_next
;
592 /* suppress this TB from the two jump lists */
593 tb_jmp_remove(tb
, 0);
594 tb_jmp_remove(tb
, 1);
596 /* suppress any remaining jumps to this TB */
602 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
603 tb2
= tb1
->jmp_next
[n1
];
604 tb_reset_jump(tb1
, n1
);
605 tb1
->jmp_next
[n1
] = NULL
;
608 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
611 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
615 target_ulong phys_pc
;
617 /* remove the TB from the hash list */
618 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
619 h
= tb_phys_hash_func(phys_pc
);
620 tb_remove(&tb_phys_hash
[h
], tb
,
621 offsetof(TranslationBlock
, phys_hash_next
));
623 /* remove the TB from the page list */
624 if (tb
->page_addr
[0] != page_addr
) {
625 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
626 tb_page_remove(&p
->first_tb
, tb
);
627 invalidate_page_bitmap(p
);
629 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
630 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
631 tb_page_remove(&p
->first_tb
, tb
);
632 invalidate_page_bitmap(p
);
636 tb_phys_invalidate_count
++;
639 static inline void set_bits(uint8_t *tab
, int start
, int len
)
645 mask
= 0xff << (start
& 7);
646 if ((start
& ~7) == (end
& ~7)) {
648 mask
&= ~(0xff << (end
& 7));
653 start
= (start
+ 8) & ~7;
655 while (start
< end1
) {
660 mask
= ~(0xff << (end
& 7));
666 static void build_page_bitmap(PageDesc
*p
)
668 int n
, tb_start
, tb_end
;
669 TranslationBlock
*tb
;
671 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
674 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
679 tb
= (TranslationBlock
*)((long)tb
& ~3);
680 /* NOTE: this is subtle as a TB may span two physical pages */
682 /* NOTE: tb_end may be after the end of the page, but
683 it is not a problem */
684 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
685 tb_end
= tb_start
+ tb
->size
;
686 if (tb_end
> TARGET_PAGE_SIZE
)
687 tb_end
= TARGET_PAGE_SIZE
;
690 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
692 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
693 tb
= tb
->page_next
[n
];
697 #ifdef TARGET_HAS_PRECISE_SMC
699 static void tb_gen_code(CPUState
*env
,
700 target_ulong pc
, target_ulong cs_base
, int flags
,
703 TranslationBlock
*tb
;
705 target_ulong phys_pc
, phys_page2
, virt_page2
;
708 phys_pc
= get_phys_addr_code(env
, pc
);
711 /* flush must be done */
713 /* cannot fail at this point */
716 tc_ptr
= code_gen_ptr
;
718 tb
->cs_base
= cs_base
;
721 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
722 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
724 /* check next page if needed */
725 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
727 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
728 phys_page2
= get_phys_addr_code(env
, virt_page2
);
730 tb_link_phys(tb
, phys_pc
, phys_page2
);
734 /* invalidate all TBs which intersect with the target physical page
735 starting in range [start;end[. NOTE: start and end must refer to
736 the same physical page. 'is_cpu_write_access' should be true if called
737 from a real cpu write access: the virtual CPU will exit the current
738 TB if code is modified inside this TB. */
739 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
740 int is_cpu_write_access
)
742 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
743 CPUState
*env
= cpu_single_env
;
745 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
746 target_ulong tb_start
, tb_end
;
747 target_ulong current_pc
, current_cs_base
;
749 p
= page_find(start
>> TARGET_PAGE_BITS
);
752 if (!p
->code_bitmap
&&
753 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
754 is_cpu_write_access
) {
755 /* build code bitmap */
756 build_page_bitmap(p
);
759 /* we remove all the TBs in the range [start, end[ */
760 /* XXX: see if in some cases it could be faster to invalidate all the code */
761 current_tb_not_found
= is_cpu_write_access
;
762 current_tb_modified
= 0;
763 current_tb
= NULL
; /* avoid warning */
764 current_pc
= 0; /* avoid warning */
765 current_cs_base
= 0; /* avoid warning */
766 current_flags
= 0; /* avoid warning */
770 tb
= (TranslationBlock
*)((long)tb
& ~3);
771 tb_next
= tb
->page_next
[n
];
772 /* NOTE: this is subtle as a TB may span two physical pages */
774 /* NOTE: tb_end may be after the end of the page, but
775 it is not a problem */
776 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
777 tb_end
= tb_start
+ tb
->size
;
779 tb_start
= tb
->page_addr
[1];
780 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
782 if (!(tb_end
<= start
|| tb_start
>= end
)) {
783 #ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb_not_found
) {
785 current_tb_not_found
= 0;
787 if (env
->mem_write_pc
) {
788 /* now we have a real cpu fault */
789 current_tb
= tb_find_pc(env
->mem_write_pc
);
792 if (current_tb
== tb
&&
793 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
800 current_tb_modified
= 1;
801 cpu_restore_state(current_tb
, env
,
802 env
->mem_write_pc
, NULL
);
803 #if defined(TARGET_I386)
804 current_flags
= env
->hflags
;
805 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
806 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
807 current_pc
= current_cs_base
+ env
->eip
;
809 #error unsupported CPU
812 #endif /* TARGET_HAS_PRECISE_SMC */
813 saved_tb
= env
->current_tb
;
814 env
->current_tb
= NULL
;
815 tb_phys_invalidate(tb
, -1);
816 env
->current_tb
= saved_tb
;
817 if (env
->interrupt_request
&& env
->current_tb
)
818 cpu_interrupt(env
, env
->interrupt_request
);
822 #if !defined(CONFIG_USER_ONLY)
823 /* if no code remaining, no need to continue to use slow writes */
825 invalidate_page_bitmap(p
);
826 if (is_cpu_write_access
) {
827 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
831 #ifdef TARGET_HAS_PRECISE_SMC
832 if (current_tb_modified
) {
833 /* we generate a block containing just the instruction
834 modifying the memory. It will ensure that it cannot modify
836 env
->current_tb
= NULL
;
837 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
839 cpu_resume_from_signal(env
, NULL
);
844 /* len must be <= 8 and start must be a multiple of len */
845 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
852 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
853 cpu_single_env
->mem_write_vaddr
, len
,
855 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
859 p
= page_find(start
>> TARGET_PAGE_BITS
);
862 if (p
->code_bitmap
) {
863 offset
= start
& ~TARGET_PAGE_MASK
;
864 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
865 if (b
& ((1 << len
) - 1))
869 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
873 #if !defined(CONFIG_SOFTMMU)
874 static void tb_invalidate_phys_page(target_ulong addr
,
875 unsigned long pc
, void *puc
)
877 int n
, current_flags
, current_tb_modified
;
878 target_ulong current_pc
, current_cs_base
;
880 TranslationBlock
*tb
, *current_tb
;
881 #ifdef TARGET_HAS_PRECISE_SMC
882 CPUState
*env
= cpu_single_env
;
885 addr
&= TARGET_PAGE_MASK
;
886 p
= page_find(addr
>> TARGET_PAGE_BITS
);
890 current_tb_modified
= 0;
892 current_pc
= 0; /* avoid warning */
893 current_cs_base
= 0; /* avoid warning */
894 current_flags
= 0; /* avoid warning */
895 #ifdef TARGET_HAS_PRECISE_SMC
897 current_tb
= tb_find_pc(pc
);
902 tb
= (TranslationBlock
*)((long)tb
& ~3);
903 #ifdef TARGET_HAS_PRECISE_SMC
904 if (current_tb
== tb
&&
905 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
906 /* If we are modifying the current TB, we must stop
907 its execution. We could be more precise by checking
908 that the modification is after the current PC, but it
909 would require a specialized function to partially
910 restore the CPU state */
912 current_tb_modified
= 1;
913 cpu_restore_state(current_tb
, env
, pc
, puc
);
914 #if defined(TARGET_I386)
915 current_flags
= env
->hflags
;
916 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
917 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
918 current_pc
= current_cs_base
+ env
->eip
;
920 #error unsupported CPU
923 #endif /* TARGET_HAS_PRECISE_SMC */
924 tb_phys_invalidate(tb
, addr
);
925 tb
= tb
->page_next
[n
];
928 #ifdef TARGET_HAS_PRECISE_SMC
929 if (current_tb_modified
) {
930 /* we generate a block containing just the instruction
931 modifying the memory. It will ensure that it cannot modify
933 env
->current_tb
= NULL
;
934 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
936 cpu_resume_from_signal(env
, puc
);
942 /* add the tb in the target page and protect it if necessary */
943 static inline void tb_alloc_page(TranslationBlock
*tb
,
944 unsigned int n
, unsigned int page_addr
)
947 TranslationBlock
*last_first_tb
;
949 tb
->page_addr
[n
] = page_addr
;
950 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
951 tb
->page_next
[n
] = p
->first_tb
;
952 last_first_tb
= p
->first_tb
;
953 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
954 invalidate_page_bitmap(p
);
956 #if defined(TARGET_HAS_SMC) || 1
958 #if defined(CONFIG_USER_ONLY)
959 if (p
->flags
& PAGE_WRITE
) {
960 unsigned long host_start
, host_end
, addr
;
963 /* force the host page as non writable (writes will have a
964 page fault + mprotect overhead) */
965 host_start
= page_addr
& qemu_host_page_mask
;
966 host_end
= host_start
+ qemu_host_page_size
;
968 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
969 prot
|= page_get_flags(addr
);
970 mprotect((void *)host_start
, qemu_host_page_size
,
971 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
972 #ifdef DEBUG_TB_INVALIDATE
973 printf("protecting code page: 0x%08lx\n",
976 p
->flags
&= ~PAGE_WRITE
;
979 /* if some code is already present, then the pages are already
980 protected. So we handle the case where only the first TB is
981 allocated in a physical page */
982 if (!last_first_tb
) {
983 target_ulong virt_addr
;
985 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
986 tlb_protect_code(cpu_single_env
, page_addr
, virt_addr
);
990 #endif /* TARGET_HAS_SMC */
993 /* Allocate a new translation block. Flush the translation buffer if
994 too many translation blocks or too much generated code. */
995 TranslationBlock
*tb_alloc(target_ulong pc
)
997 TranslationBlock
*tb
;
999 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
1000 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
1002 tb
= &tbs
[nb_tbs
++];
1008 /* add a new TB and link it to the physical page tables. phys_page2 is
1009 (-1) to indicate that only one page contains the TB. */
1010 void tb_link_phys(TranslationBlock
*tb
,
1011 target_ulong phys_pc
, target_ulong phys_page2
)
1014 TranslationBlock
**ptb
;
1016 /* add in the physical hash table */
1017 h
= tb_phys_hash_func(phys_pc
);
1018 ptb
= &tb_phys_hash
[h
];
1019 tb
->phys_hash_next
= *ptb
;
1022 /* add in the page list */
1023 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1024 if (phys_page2
!= -1)
1025 tb_alloc_page(tb
, 1, phys_page2
);
1027 tb
->page_addr
[1] = -1;
1028 #ifdef DEBUG_TB_CHECK
1033 /* link the tb with the other TBs */
1034 void tb_link(TranslationBlock
*tb
)
1036 #if !defined(CONFIG_USER_ONLY)
1041 /* save the code memory mappings (needed to invalidate the code) */
1042 addr
= tb
->pc
& TARGET_PAGE_MASK
;
1043 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1044 #ifdef DEBUG_TLB_CHECK
1045 if (vp
->valid_tag
== virt_valid_tag
&&
1046 vp
->phys_addr
!= tb
->page_addr
[0]) {
1047 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1048 addr
, tb
->page_addr
[0], vp
->phys_addr
);
1051 vp
->phys_addr
= tb
->page_addr
[0];
1052 if (vp
->valid_tag
!= virt_valid_tag
) {
1053 vp
->valid_tag
= virt_valid_tag
;
1054 #if !defined(CONFIG_SOFTMMU)
1059 if (tb
->page_addr
[1] != -1) {
1060 addr
+= TARGET_PAGE_SIZE
;
1061 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1062 #ifdef DEBUG_TLB_CHECK
1063 if (vp
->valid_tag
== virt_valid_tag
&&
1064 vp
->phys_addr
!= tb
->page_addr
[1]) {
1065 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1066 addr
, tb
->page_addr
[1], vp
->phys_addr
);
1069 vp
->phys_addr
= tb
->page_addr
[1];
1070 if (vp
->valid_tag
!= virt_valid_tag
) {
1071 vp
->valid_tag
= virt_valid_tag
;
1072 #if !defined(CONFIG_SOFTMMU)
1080 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1081 tb
->jmp_next
[0] = NULL
;
1082 tb
->jmp_next
[1] = NULL
;
1083 #ifdef USE_CODE_COPY
1084 tb
->cflags
&= ~CF_FP_USED
;
1085 if (tb
->cflags
& CF_TB_FP_USED
)
1086 tb
->cflags
|= CF_FP_USED
;
1089 /* init original jump addresses */
1090 if (tb
->tb_next_offset
[0] != 0xffff)
1091 tb_reset_jump(tb
, 0);
1092 if (tb
->tb_next_offset
[1] != 0xffff)
1093 tb_reset_jump(tb
, 1);
1096 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1097 tb[1].tc_ptr. Return NULL if not found */
1098 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1100 int m_min
, m_max
, m
;
1102 TranslationBlock
*tb
;
1106 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1107 tc_ptr
>= (unsigned long)code_gen_ptr
)
1109 /* binary search (cf Knuth) */
1112 while (m_min
<= m_max
) {
1113 m
= (m_min
+ m_max
) >> 1;
1115 v
= (unsigned long)tb
->tc_ptr
;
1118 else if (tc_ptr
< v
) {
1127 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1129 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1131 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1134 tb1
= tb
->jmp_next
[n
];
1136 /* find head of list */
1139 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1142 tb1
= tb1
->jmp_next
[n1
];
1144 /* we are now sure now that tb jumps to tb1 */
1147 /* remove tb from the jmp_first list */
1148 ptb
= &tb_next
->jmp_first
;
1152 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1153 if (n1
== n
&& tb1
== tb
)
1155 ptb
= &tb1
->jmp_next
[n1
];
1157 *ptb
= tb
->jmp_next
[n
];
1158 tb
->jmp_next
[n
] = NULL
;
1160 /* suppress the jump to next tb in generated code */
1161 tb_reset_jump(tb
, n
);
1163 /* suppress jumps in the tb on which we could have jumped */
1164 tb_reset_jump_recursive(tb_next
);
1168 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1170 tb_reset_jump_recursive2(tb
, 0);
1171 tb_reset_jump_recursive2(tb
, 1);
1174 #if defined(TARGET_HAS_ICE)
1175 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1177 target_ulong phys_addr
;
1179 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1180 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1184 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1185 breakpoint is reached */
1186 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1188 #if defined(TARGET_HAS_ICE)
1191 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1192 if (env
->breakpoints
[i
] == pc
)
1196 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1198 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1200 breakpoint_invalidate(env
, pc
);
1207 /* remove a breakpoint */
1208 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1210 #if defined(TARGET_HAS_ICE)
1212 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1213 if (env
->breakpoints
[i
] == pc
)
1218 env
->nb_breakpoints
--;
1219 if (i
< env
->nb_breakpoints
)
1220 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1222 breakpoint_invalidate(env
, pc
);
1229 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1230 CPU loop after each instruction */
1231 void cpu_single_step(CPUState
*env
, int enabled
)
1233 #if defined(TARGET_HAS_ICE)
1234 if (env
->singlestep_enabled
!= enabled
) {
1235 env
->singlestep_enabled
= enabled
;
1236 /* must flush all the translated code to avoid inconsistancies */
1237 /* XXX: only flush what is necessary */
1243 /* enable or disable low levels log */
1244 void cpu_set_log(int log_flags
)
1246 loglevel
= log_flags
;
1247 if (loglevel
&& !logfile
) {
1248 logfile
= fopen(logfilename
, "w");
1250 perror(logfilename
);
1253 #if !defined(CONFIG_SOFTMMU)
1254 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1256 static uint8_t logfile_buf
[4096];
1257 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1260 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1265 void cpu_set_log_filename(const char *filename
)
1267 logfilename
= strdup(filename
);
1270 /* mask must never be zero, except for A20 change call */
1271 void cpu_interrupt(CPUState
*env
, int mask
)
1273 TranslationBlock
*tb
;
1274 static int interrupt_lock
;
1276 env
->interrupt_request
|= mask
;
1277 /* if the cpu is currently executing code, we must unlink it and
1278 all the potentially executing TB */
1279 tb
= env
->current_tb
;
1280 if (tb
&& !testandset(&interrupt_lock
)) {
1281 env
->current_tb
= NULL
;
1282 tb_reset_jump_recursive(tb
);
1287 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1289 env
->interrupt_request
&= ~mask
;
1292 CPULogItem cpu_log_items
[] = {
1293 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1294 "show generated host assembly code for each compiled TB" },
1295 { CPU_LOG_TB_IN_ASM
, "in_asm",
1296 "show target assembly code for each compiled TB" },
1297 { CPU_LOG_TB_OP
, "op",
1298 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1300 { CPU_LOG_TB_OP_OPT
, "op_opt",
1301 "show micro ops after optimization for each compiled TB" },
1303 { CPU_LOG_INT
, "int",
1304 "show interrupts/exceptions in short format" },
1305 { CPU_LOG_EXEC
, "exec",
1306 "show trace before each executed TB (lots of logs)" },
1307 { CPU_LOG_TB_CPU
, "cpu",
1308 "show CPU state before bloc translation" },
1310 { CPU_LOG_PCALL
, "pcall",
1311 "show protected mode far calls/returns/exceptions" },
1314 { CPU_LOG_IOPORT
, "ioport",
1315 "show all i/o ports accesses" },
1320 static int cmp1(const char *s1
, int n
, const char *s2
)
1322 if (strlen(s2
) != n
)
1324 return memcmp(s1
, s2
, n
) == 0;
1327 /* takes a comma separated list of log masks. Return 0 if error. */
1328 int cpu_str_to_log_mask(const char *str
)
1337 p1
= strchr(p
, ',');
1340 if(cmp1(p
,p1
-p
,"all")) {
1341 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1345 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1346 if (cmp1(p
, p1
- p
, item
->name
))
1360 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1365 fprintf(stderr
, "qemu: fatal: ");
1366 vfprintf(stderr
, fmt
, ap
);
1367 fprintf(stderr
, "\n");
1369 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1371 cpu_dump_state(env
, stderr
, fprintf
, 0);
1377 #if !defined(CONFIG_USER_ONLY)
1379 /* NOTE: if flush_global is true, also flush global entries (not
1381 void tlb_flush(CPUState
*env
, int flush_global
)
1385 #if defined(DEBUG_TLB)
1386 printf("tlb_flush:\n");
1388 /* must reset current TB so that interrupts cannot modify the
1389 links while we are modifying them */
1390 env
->current_tb
= NULL
;
1392 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1393 env
->tlb_read
[0][i
].address
= -1;
1394 env
->tlb_write
[0][i
].address
= -1;
1395 env
->tlb_read
[1][i
].address
= -1;
1396 env
->tlb_write
[1][i
].address
= -1;
1400 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
1402 #if !defined(CONFIG_SOFTMMU)
1403 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1406 if (env
->kqemu_enabled
) {
1407 kqemu_flush(env
, flush_global
);
1413 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1415 if (addr
== (tlb_entry
->address
&
1416 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1417 tlb_entry
->address
= -1;
1420 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1425 TranslationBlock
*tb
;
1427 #if defined(DEBUG_TLB)
1428 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1430 /* must reset current TB so that interrupts cannot modify the
1431 links while we are modifying them */
1432 env
->current_tb
= NULL
;
1434 addr
&= TARGET_PAGE_MASK
;
1435 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1436 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1437 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1438 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1439 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1441 /* remove from the virtual pc hash table all the TB at this
1444 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1445 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1446 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1448 /* we remove all the links to the TBs in this virtual page */
1450 while (tb
!= NULL
) {
1452 tb
= (TranslationBlock
*)((long)tb
& ~3);
1453 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1454 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1457 tb
= tb
->page_next
[n
];
1463 #if !defined(CONFIG_SOFTMMU)
1464 if (addr
< MMAP_AREA_END
)
1465 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1468 if (env
->kqemu_enabled
) {
1469 kqemu_flush_page(env
, addr
);
1474 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1476 if (addr
== (tlb_entry
->address
&
1477 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1478 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1479 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1483 /* update the TLBs so that writes to code in the virtual page 'addr'
1485 static void tlb_protect_code(CPUState
*env
, ram_addr_t ram_addr
,
1490 vaddr
&= TARGET_PAGE_MASK
;
1491 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1492 tlb_protect_code1(&env
->tlb_write
[0][i
], vaddr
);
1493 tlb_protect_code1(&env
->tlb_write
[1][i
], vaddr
);
1496 if (env
->kqemu_enabled
) {
1497 kqemu_set_notdirty(env
, ram_addr
);
1500 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] &= ~CODE_DIRTY_FLAG
;
1502 #if !defined(CONFIG_SOFTMMU)
1503 /* NOTE: as we generated the code for this page, it is already at
1505 if (vaddr
< MMAP_AREA_END
)
1506 mprotect((void *)vaddr
, TARGET_PAGE_SIZE
, PROT_READ
);
1510 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1511 tested for self modifying code */
1512 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1515 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1518 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1519 unsigned long start
, unsigned long length
)
1522 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1523 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1524 if ((addr
- start
) < length
) {
1525 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1530 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1534 unsigned long length
, start1
;
1538 start
&= TARGET_PAGE_MASK
;
1539 end
= TARGET_PAGE_ALIGN(end
);
1541 length
= end
- start
;
1544 len
= length
>> TARGET_PAGE_BITS
;
1545 env
= cpu_single_env
;
1547 if (env
->kqemu_enabled
) {
1550 for(i
= 0; i
< len
; i
++) {
1551 kqemu_set_notdirty(env
, addr
);
1552 addr
+= TARGET_PAGE_SIZE
;
1556 mask
= ~dirty_flags
;
1557 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1558 for(i
= 0; i
< len
; i
++)
1561 /* we modify the TLB cache so that the dirty bit will be set again
1562 when accessing the range */
1563 start1
= start
+ (unsigned long)phys_ram_base
;
1564 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1565 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1566 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1567 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1569 #if !defined(CONFIG_SOFTMMU)
1570 /* XXX: this is expensive */
1576 for(i
= 0; i
< L1_SIZE
; i
++) {
1579 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1580 for(j
= 0; j
< L2_SIZE
; j
++) {
1581 if (p
->valid_tag
== virt_valid_tag
&&
1582 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1583 (p
->prot
& PROT_WRITE
)) {
1584 if (addr
< MMAP_AREA_END
) {
1585 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1586 p
->prot
& ~PROT_WRITE
);
1589 addr
+= TARGET_PAGE_SIZE
;
1598 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1600 ram_addr_t ram_addr
;
1602 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1603 ram_addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) +
1604 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1605 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1606 tlb_entry
->address
|= IO_MEM_NOTDIRTY
;
1611 /* update the TLB according to the current state of the dirty bits */
1612 void cpu_tlb_update_dirty(CPUState
*env
)
1615 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1616 tlb_update_dirty(&env
->tlb_write
[0][i
]);
1617 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1618 tlb_update_dirty(&env
->tlb_write
[1][i
]);
1621 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1622 unsigned long start
)
1625 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1626 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1627 if (addr
== start
) {
1628 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1633 /* update the TLB corresponding to virtual page vaddr and phys addr
1634 addr so that it is no longer dirty */
1635 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1637 CPUState
*env
= cpu_single_env
;
1640 addr
&= TARGET_PAGE_MASK
;
1641 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1642 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1643 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1646 /* add a new TLB entry. At most one entry for a given virtual address
1647 is permitted. Return 0 if OK or 2 if the page could not be mapped
1648 (can only happen in non SOFTMMU mode for I/O pages or pages
1649 conflicting with the host address space). */
1650 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1651 target_phys_addr_t paddr
, int prot
,
1652 int is_user
, int is_softmmu
)
1657 target_ulong address
;
1658 target_phys_addr_t addend
;
1661 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1663 pd
= IO_MEM_UNASSIGNED
;
1665 pd
= p
->phys_offset
;
1667 #if defined(DEBUG_TLB)
1668 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1669 vaddr
, paddr
, prot
, is_user
, is_softmmu
, pd
);
1673 #if !defined(CONFIG_SOFTMMU)
1677 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1678 /* IO memory case */
1679 address
= vaddr
| pd
;
1682 /* standard memory */
1684 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1687 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1689 if (prot
& PAGE_READ
) {
1690 env
->tlb_read
[is_user
][index
].address
= address
;
1691 env
->tlb_read
[is_user
][index
].addend
= addend
;
1693 env
->tlb_read
[is_user
][index
].address
= -1;
1694 env
->tlb_read
[is_user
][index
].addend
= -1;
1696 if (prot
& PAGE_WRITE
) {
1697 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1698 /* ROM: access is ignored (same as unassigned) */
1699 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1700 env
->tlb_write
[is_user
][index
].addend
= addend
;
1701 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1702 !cpu_physical_memory_is_dirty(pd
)) {
1703 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1704 env
->tlb_write
[is_user
][index
].addend
= addend
;
1706 env
->tlb_write
[is_user
][index
].address
= address
;
1707 env
->tlb_write
[is_user
][index
].addend
= addend
;
1710 env
->tlb_write
[is_user
][index
].address
= -1;
1711 env
->tlb_write
[is_user
][index
].addend
= -1;
1714 #if !defined(CONFIG_SOFTMMU)
1716 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1717 /* IO access: no mapping is done as it will be handled by the
1719 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1724 if (vaddr
>= MMAP_AREA_END
) {
1727 if (prot
& PROT_WRITE
) {
1728 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1729 #if defined(TARGET_HAS_SMC) || 1
1732 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1733 !cpu_physical_memory_is_dirty(pd
))) {
1734 /* ROM: we do as if code was inside */
1735 /* if code is present, we only map as read only and save the
1739 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1742 vp
->valid_tag
= virt_valid_tag
;
1743 prot
&= ~PAGE_WRITE
;
1746 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1747 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1748 if (map_addr
== MAP_FAILED
) {
1749 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1759 /* called from signal handler: invalidate the code and unprotect the
1760 page. Return TRUE if the fault was succesfully handled. */
1761 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1763 #if !defined(CONFIG_SOFTMMU)
1766 #if defined(DEBUG_TLB)
1767 printf("page_unprotect: addr=0x%08x\n", addr
);
1769 addr
&= TARGET_PAGE_MASK
;
1771 /* if it is not mapped, no need to worry here */
1772 if (addr
>= MMAP_AREA_END
)
1774 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1777 /* NOTE: in this case, validate_tag is _not_ tested as it
1778 validates only the code TLB */
1779 if (vp
->valid_tag
!= virt_valid_tag
)
1781 if (!(vp
->prot
& PAGE_WRITE
))
1783 #if defined(DEBUG_TLB)
1784 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1785 addr
, vp
->phys_addr
, vp
->prot
);
1787 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1788 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1789 (unsigned long)addr
, vp
->prot
);
1790 /* set the dirty bit */
1791 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1792 /* flush the code inside */
1793 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1802 void tlb_flush(CPUState
*env
, int flush_global
)
1806 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1810 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1811 target_phys_addr_t paddr
, int prot
,
1812 int is_user
, int is_softmmu
)
1817 /* dump memory mappings */
1818 void page_dump(FILE *f
)
1820 unsigned long start
, end
;
1821 int i
, j
, prot
, prot1
;
1824 fprintf(f
, "%-8s %-8s %-8s %s\n",
1825 "start", "end", "size", "prot");
1829 for(i
= 0; i
<= L1_SIZE
; i
++) {
1834 for(j
= 0;j
< L2_SIZE
; j
++) {
1839 if (prot1
!= prot
) {
1840 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1842 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1843 start
, end
, end
- start
,
1844 prot
& PAGE_READ
? 'r' : '-',
1845 prot
& PAGE_WRITE
? 'w' : '-',
1846 prot
& PAGE_EXEC
? 'x' : '-');
1860 int page_get_flags(unsigned long address
)
1864 p
= page_find(address
>> TARGET_PAGE_BITS
);
1870 /* modify the flags of a page and invalidate the code if
1871 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1872 depending on PAGE_WRITE */
1873 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1878 start
= start
& TARGET_PAGE_MASK
;
1879 end
= TARGET_PAGE_ALIGN(end
);
1880 if (flags
& PAGE_WRITE
)
1881 flags
|= PAGE_WRITE_ORG
;
1882 spin_lock(&tb_lock
);
1883 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1884 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1885 /* if the write protection is set, then we invalidate the code
1887 if (!(p
->flags
& PAGE_WRITE
) &&
1888 (flags
& PAGE_WRITE
) &&
1890 tb_invalidate_phys_page(addr
, 0, NULL
);
1894 spin_unlock(&tb_lock
);
1897 /* called from signal handler: invalidate the code and unprotect the
1898 page. Return TRUE if the fault was succesfully handled. */
1899 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1901 unsigned int page_index
, prot
, pindex
;
1903 unsigned long host_start
, host_end
, addr
;
1905 host_start
= address
& qemu_host_page_mask
;
1906 page_index
= host_start
>> TARGET_PAGE_BITS
;
1907 p1
= page_find(page_index
);
1910 host_end
= host_start
+ qemu_host_page_size
;
1913 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1917 /* if the page was really writable, then we change its
1918 protection back to writable */
1919 if (prot
& PAGE_WRITE_ORG
) {
1920 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1921 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1922 mprotect((void *)host_start
, qemu_host_page_size
,
1923 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1924 p1
[pindex
].flags
|= PAGE_WRITE
;
1925 /* and since the content will be modified, we must invalidate
1926 the corresponding translated code. */
1927 tb_invalidate_phys_page(address
, pc
, puc
);
1928 #ifdef DEBUG_TB_CHECK
1929 tb_invalidate_check(address
);
1937 /* call this function when system calls directly modify a memory area */
1938 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1940 unsigned long start
, end
, addr
;
1942 start
= (unsigned long)data
;
1943 end
= start
+ data_size
;
1944 start
&= TARGET_PAGE_MASK
;
1945 end
= TARGET_PAGE_ALIGN(end
);
1946 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1947 page_unprotect(addr
, 0, NULL
);
1951 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1954 #endif /* defined(CONFIG_USER_ONLY) */
1956 /* register physical memory. 'size' must be a multiple of the target
1957 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1959 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1961 unsigned long phys_offset
)
1963 target_phys_addr_t addr
, end_addr
;
1966 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1967 end_addr
= start_addr
+ size
;
1968 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1969 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1970 p
->phys_offset
= phys_offset
;
1971 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1972 phys_offset
+= TARGET_PAGE_SIZE
;
1976 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1981 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1985 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1986 unassigned_mem_readb
,
1987 unassigned_mem_readb
,
1988 unassigned_mem_readb
,
1991 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1992 unassigned_mem_writeb
,
1993 unassigned_mem_writeb
,
1994 unassigned_mem_writeb
,
1997 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1999 unsigned long ram_addr
;
2001 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2002 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2003 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2004 #if !defined(CONFIG_USER_ONLY)
2005 tb_invalidate_phys_page_fast(ram_addr
, 1);
2006 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2009 stb_p((uint8_t *)(long)addr
, val
);
2010 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2011 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2012 /* we remove the notdirty callback only if the code has been
2014 if (dirty_flags
== 0xff)
2015 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2018 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2020 unsigned long ram_addr
;
2022 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2023 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2024 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2025 #if !defined(CONFIG_USER_ONLY)
2026 tb_invalidate_phys_page_fast(ram_addr
, 2);
2027 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2030 stw_p((uint8_t *)(long)addr
, val
);
2031 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2032 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2033 /* we remove the notdirty callback only if the code has been
2035 if (dirty_flags
== 0xff)
2036 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2039 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2041 unsigned long ram_addr
;
2043 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2044 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2045 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2046 #if !defined(CONFIG_USER_ONLY)
2047 tb_invalidate_phys_page_fast(ram_addr
, 4);
2048 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2051 stl_p((uint8_t *)(long)addr
, val
);
2052 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2053 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2054 /* we remove the notdirty callback only if the code has been
2056 if (dirty_flags
== 0xff)
2057 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2060 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2061 NULL
, /* never used */
2062 NULL
, /* never used */
2063 NULL
, /* never used */
2066 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2067 notdirty_mem_writeb
,
2068 notdirty_mem_writew
,
2069 notdirty_mem_writel
,
2072 static void io_mem_init(void)
2074 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2075 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2076 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2079 /* alloc dirty bits array */
2080 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2081 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2084 /* mem_read and mem_write are arrays of functions containing the
2085 function to access byte (index 0), word (index 1) and dword (index
2086 2). All functions must be supplied. If io_index is non zero, the
2087 corresponding io zone is modified. If it is zero, a new io zone is
2088 allocated. The return value can be used with
2089 cpu_register_physical_memory(). (-1) is returned if error. */
2090 int cpu_register_io_memory(int io_index
,
2091 CPUReadMemoryFunc
**mem_read
,
2092 CPUWriteMemoryFunc
**mem_write
,
2097 if (io_index
<= 0) {
2098 if (io_index
>= IO_MEM_NB_ENTRIES
)
2100 io_index
= io_mem_nb
++;
2102 if (io_index
>= IO_MEM_NB_ENTRIES
)
2106 for(i
= 0;i
< 3; i
++) {
2107 io_mem_read
[io_index
][i
] = mem_read
[i
];
2108 io_mem_write
[io_index
][i
] = mem_write
[i
];
2110 io_mem_opaque
[io_index
] = opaque
;
2111 return io_index
<< IO_MEM_SHIFT
;
2114 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2116 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2119 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2121 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2124 /* physical memory access (slow version, mainly for debug) */
2125 #if defined(CONFIG_USER_ONLY)
2126 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2127 int len
, int is_write
)
2133 page
= addr
& TARGET_PAGE_MASK
;
2134 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2137 flags
= page_get_flags(page
);
2138 if (!(flags
& PAGE_VALID
))
2141 if (!(flags
& PAGE_WRITE
))
2143 memcpy((uint8_t *)addr
, buf
, len
);
2145 if (!(flags
& PAGE_READ
))
2147 memcpy(buf
, (uint8_t *)addr
, len
);
2156 uint32_t ldl_phys(target_phys_addr_t addr
)
2161 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2165 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2170 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2171 int len
, int is_write
)
2176 target_phys_addr_t page
;
2181 page
= addr
& TARGET_PAGE_MASK
;
2182 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2185 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2187 pd
= IO_MEM_UNASSIGNED
;
2189 pd
= p
->phys_offset
;
2193 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2194 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2195 if (l
>= 4 && ((addr
& 3) == 0)) {
2196 /* 32 bit write access */
2198 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2200 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2201 /* 16 bit write access */
2203 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2206 /* 8 bit write access */
2208 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2212 unsigned long addr1
;
2213 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2215 ptr
= phys_ram_base
+ addr1
;
2216 memcpy(ptr
, buf
, l
);
2217 if (!cpu_physical_memory_is_dirty(addr1
)) {
2218 /* invalidate code */
2219 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2221 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2222 (0xff & ~CODE_DIRTY_FLAG
);
2226 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2228 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2229 if (l
>= 4 && ((addr
& 3) == 0)) {
2230 /* 32 bit read access */
2231 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2234 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2235 /* 16 bit read access */
2236 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2240 /* 8 bit read access */
2241 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2247 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2248 (addr
& ~TARGET_PAGE_MASK
);
2249 memcpy(buf
, ptr
, l
);
2258 /* warning: addr must be aligned */
2259 uint32_t ldl_phys(target_phys_addr_t addr
)
2267 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2269 pd
= IO_MEM_UNASSIGNED
;
2271 pd
= p
->phys_offset
;
2274 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2276 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2277 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2280 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2281 (addr
& ~TARGET_PAGE_MASK
);
2287 /* warning: addr must be aligned. The ram page is not masked as dirty
2288 and the code inside is not invalidated. It is useful if the dirty
2289 bits are used to track modified PTEs */
2290 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2297 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2299 pd
= IO_MEM_UNASSIGNED
;
2301 pd
= p
->phys_offset
;
2304 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2305 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2306 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2308 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2309 (addr
& ~TARGET_PAGE_MASK
);
2314 /* warning: addr must be aligned */
2315 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2322 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2324 pd
= IO_MEM_UNASSIGNED
;
2326 pd
= p
->phys_offset
;
2329 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2330 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2331 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2333 unsigned long addr1
;
2334 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2336 ptr
= phys_ram_base
+ addr1
;
2338 if (!cpu_physical_memory_is_dirty(addr1
)) {
2339 /* invalidate code */
2340 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2342 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2343 (0xff & ~CODE_DIRTY_FLAG
);
2350 /* virtual memory access for debug */
2351 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2352 uint8_t *buf
, int len
, int is_write
)
2355 target_ulong page
, phys_addr
;
2358 page
= addr
& TARGET_PAGE_MASK
;
2359 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2360 /* if no physical page mapped, return an error */
2361 if (phys_addr
== -1)
2363 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2366 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2375 void dump_exec_info(FILE *f
,
2376 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2378 int i
, target_code_size
, max_target_code_size
;
2379 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2380 TranslationBlock
*tb
;
2382 target_code_size
= 0;
2383 max_target_code_size
= 0;
2385 direct_jmp_count
= 0;
2386 direct_jmp2_count
= 0;
2387 for(i
= 0; i
< nb_tbs
; i
++) {
2389 target_code_size
+= tb
->size
;
2390 if (tb
->size
> max_target_code_size
)
2391 max_target_code_size
= tb
->size
;
2392 if (tb
->page_addr
[1] != -1)
2394 if (tb
->tb_next_offset
[0] != 0xffff) {
2396 if (tb
->tb_next_offset
[1] != 0xffff) {
2397 direct_jmp2_count
++;
2401 /* XXX: avoid using doubles ? */
2402 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2403 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2404 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2405 max_target_code_size
);
2406 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2407 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2408 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2409 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2411 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2412 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2414 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2416 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2417 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2418 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2419 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2422 #if !defined(CONFIG_USER_ONLY)
2424 #define MMUSUFFIX _cmmu
2425 #define GETPC() NULL
2426 #define env cpu_single_env
2427 #define SOFTMMU_CODE_ACCESS
2430 #include "softmmu_template.h"
2433 #include "softmmu_template.h"
2436 #include "softmmu_template.h"
2439 #include "softmmu_template.h"