2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
63 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
64 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
65 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
67 /* any access to the tbs or the page table must use this lock */
68 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
70 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
71 uint8_t *code_gen_ptr
;
75 uint8_t *phys_ram_base
;
76 uint8_t *phys_ram_dirty
;
78 typedef struct PageDesc
{
79 /* list of TBs intersecting this ram page */
80 TranslationBlock
*first_tb
;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count
;
85 #if defined(CONFIG_USER_ONLY)
90 typedef struct PhysPageDesc
{
91 /* offset in host memory of the page + io_index in the low 12 bits */
95 /* Note: the VirtPage handling is absolete and will be suppressed
97 typedef struct VirtPageDesc
{
98 /* physical address of code page. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 target_ulong phys_addr
;
101 unsigned int valid_tag
;
102 #if !defined(CONFIG_SOFTMMU)
103 /* original page access rights. It is valid only if 'valid_tag'
104 matches 'virt_valid_tag' */
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
112 #define L1_SIZE (1 << L1_BITS)
113 #define L2_SIZE (1 << L2_BITS)
115 static void io_mem_init(void);
117 unsigned long qemu_real_host_page_size
;
118 unsigned long qemu_host_page_bits
;
119 unsigned long qemu_host_page_size
;
120 unsigned long qemu_host_page_mask
;
122 /* XXX: for system emulation, it could just be an array */
123 static PageDesc
*l1_map
[L1_SIZE
];
124 PhysPageDesc
**l1_phys_map
;
126 #if !defined(CONFIG_USER_ONLY)
127 #if TARGET_LONG_BITS > 32
128 #define VIRT_L_BITS 9
129 #define VIRT_L_SIZE (1 << VIRT_L_BITS)
130 static void *l1_virt_map
[VIRT_L_SIZE
];
132 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
134 static unsigned int virt_valid_tag
;
137 /* io memory support */
138 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
139 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
140 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
141 static int io_mem_nb
;
144 char *logfilename
= "/tmp/qemu.log";
149 static int tlb_flush_count
;
150 static int tb_flush_count
;
151 static int tb_phys_invalidate_count
;
153 static void page_init(void)
155 /* NOTE: we can always suppose that qemu_host_page_size >=
159 SYSTEM_INFO system_info
;
162 GetSystemInfo(&system_info
);
163 qemu_real_host_page_size
= system_info
.dwPageSize
;
165 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
166 PAGE_EXECUTE_READWRITE
, &old_protect
);
169 qemu_real_host_page_size
= getpagesize();
171 unsigned long start
, end
;
173 start
= (unsigned long)code_gen_buffer
;
174 start
&= ~(qemu_real_host_page_size
- 1);
176 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
177 end
+= qemu_real_host_page_size
- 1;
178 end
&= ~(qemu_real_host_page_size
- 1);
180 mprotect((void *)start
, end
- start
,
181 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
185 if (qemu_host_page_size
== 0)
186 qemu_host_page_size
= qemu_real_host_page_size
;
187 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
188 qemu_host_page_size
= TARGET_PAGE_SIZE
;
189 qemu_host_page_bits
= 0;
190 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
191 qemu_host_page_bits
++;
192 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
193 #if !defined(CONFIG_USER_ONLY)
196 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
197 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
200 static inline PageDesc
*page_find_alloc(unsigned int index
)
204 lp
= &l1_map
[index
>> L2_BITS
];
207 /* allocate if not found */
208 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
209 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
212 return p
+ (index
& (L2_SIZE
- 1));
215 static inline PageDesc
*page_find(unsigned int index
)
219 p
= l1_map
[index
>> L2_BITS
];
222 return p
+ (index
& (L2_SIZE
- 1));
225 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
229 p
= (void **)l1_phys_map
;
230 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
232 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
233 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
235 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
238 /* allocate if not found */
241 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
242 memset(p
, 0, sizeof(void *) * L1_SIZE
);
246 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
249 /* allocate if not found */
252 p
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
253 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
256 return ((PhysPageDesc
*)p
) + (index
& (L2_SIZE
- 1));
259 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
261 return phys_page_find_alloc(index
, 0);
264 #if !defined(CONFIG_USER_ONLY)
265 static void tlb_protect_code(CPUState
*env
, ram_addr_t ram_addr
,
267 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
270 static VirtPageDesc
*virt_page_find_alloc(target_ulong index
, int alloc
)
272 #if TARGET_LONG_BITS > 32
276 lp
= p
+ ((index
>> (5 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
281 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
284 lp
= p
+ ((index
>> (4 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
289 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
292 lp
= p
+ ((index
>> (3 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
297 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
300 lp
= p
+ ((index
>> (2 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
305 p
= qemu_mallocz(sizeof(void *) * VIRT_L_SIZE
);
308 lp
= p
+ ((index
>> (1 * VIRT_L_BITS
)) & (VIRT_L_SIZE
- 1));
313 p
= qemu_mallocz(sizeof(VirtPageDesc
) * VIRT_L_SIZE
);
316 return ((VirtPageDesc
*)p
) + (index
& (VIRT_L_SIZE
- 1));
318 VirtPageDesc
*p
, **lp
;
320 lp
= &l1_virt_map
[index
>> L2_BITS
];
323 /* allocate if not found */
326 p
= qemu_mallocz(sizeof(VirtPageDesc
) * L2_SIZE
);
329 return p
+ (index
& (L2_SIZE
- 1));
333 static inline VirtPageDesc
*virt_page_find(target_ulong index
)
335 return virt_page_find_alloc(index
, 0);
338 #if TARGET_LONG_BITS > 32
339 static void virt_page_flush_internal(void **p
, int level
)
343 VirtPageDesc
*q
= (VirtPageDesc
*)p
;
344 for(i
= 0; i
< VIRT_L_SIZE
; i
++)
348 for(i
= 0; i
< VIRT_L_SIZE
; i
++) {
350 virt_page_flush_internal(p
[i
], level
);
356 static void virt_page_flush(void)
360 if (virt_valid_tag
== 0) {
362 #if TARGET_LONG_BITS > 32
363 virt_page_flush_internal(l1_virt_map
, 5);
368 for(i
= 0; i
< L1_SIZE
; i
++) {
371 for(j
= 0; j
< L2_SIZE
; j
++)
380 static void virt_page_flush(void)
385 void cpu_exec_init(void)
388 code_gen_ptr
= code_gen_buffer
;
394 static inline void invalidate_page_bitmap(PageDesc
*p
)
396 if (p
->code_bitmap
) {
397 qemu_free(p
->code_bitmap
);
398 p
->code_bitmap
= NULL
;
400 p
->code_write_count
= 0;
403 /* set to NULL all the 'first_tb' fields in all PageDescs */
404 static void page_flush_tb(void)
409 for(i
= 0; i
< L1_SIZE
; i
++) {
412 for(j
= 0; j
< L2_SIZE
; j
++) {
414 invalidate_page_bitmap(p
);
421 /* flush all the translation blocks */
422 /* XXX: tb_flush is currently not thread safe */
423 void tb_flush(CPUState
*env
)
425 #if defined(DEBUG_FLUSH)
426 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
427 code_gen_ptr
- code_gen_buffer
,
429 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
432 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
435 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
438 code_gen_ptr
= code_gen_buffer
;
439 /* XXX: flush processor icache at this point if cache flush is
444 #ifdef DEBUG_TB_CHECK
446 static void tb_invalidate_check(unsigned long address
)
448 TranslationBlock
*tb
;
450 address
&= TARGET_PAGE_MASK
;
451 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
452 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
453 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
454 address
>= tb
->pc
+ tb
->size
)) {
455 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
456 address
, tb
->pc
, tb
->size
);
462 /* verify that all the pages have correct rights for code */
463 static void tb_page_check(void)
465 TranslationBlock
*tb
;
466 int i
, flags1
, flags2
;
468 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
469 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
470 flags1
= page_get_flags(tb
->pc
);
471 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
472 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
473 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
474 tb
->pc
, tb
->size
, flags1
, flags2
);
480 void tb_jmp_check(TranslationBlock
*tb
)
482 TranslationBlock
*tb1
;
485 /* suppress any remaining jumps to this TB */
489 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
492 tb1
= tb1
->jmp_next
[n1
];
494 /* check end of list */
496 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
502 /* invalidate one TB */
503 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
506 TranslationBlock
*tb1
;
510 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
513 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
517 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
519 TranslationBlock
*tb1
;
525 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
527 *ptb
= tb1
->page_next
[n1
];
530 ptb
= &tb1
->page_next
[n1
];
534 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
536 TranslationBlock
*tb1
, **ptb
;
539 ptb
= &tb
->jmp_next
[n
];
542 /* find tb(n) in circular list */
546 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
547 if (n1
== n
&& tb1
== tb
)
550 ptb
= &tb1
->jmp_first
;
552 ptb
= &tb1
->jmp_next
[n1
];
555 /* now we can suppress tb(n) from the list */
556 *ptb
= tb
->jmp_next
[n
];
558 tb
->jmp_next
[n
] = NULL
;
562 /* reset the jump entry 'n' of a TB so that it is not chained to
564 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
566 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
569 static inline void tb_invalidate(TranslationBlock
*tb
)
572 TranslationBlock
*tb1
, *tb2
, **ptb
;
574 tb_invalidated_flag
= 1;
576 /* remove the TB from the hash list */
577 h
= tb_hash_func(tb
->pc
);
581 /* NOTE: the TB is not necessarily linked in the hash. It
582 indicates that it is not currently used */
586 *ptb
= tb1
->hash_next
;
589 ptb
= &tb1
->hash_next
;
592 /* suppress this TB from the two jump lists */
593 tb_jmp_remove(tb
, 0);
594 tb_jmp_remove(tb
, 1);
596 /* suppress any remaining jumps to this TB */
602 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
603 tb2
= tb1
->jmp_next
[n1
];
604 tb_reset_jump(tb1
, n1
);
605 tb1
->jmp_next
[n1
] = NULL
;
608 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
611 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
615 target_ulong phys_pc
;
617 /* remove the TB from the hash list */
618 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
619 h
= tb_phys_hash_func(phys_pc
);
620 tb_remove(&tb_phys_hash
[h
], tb
,
621 offsetof(TranslationBlock
, phys_hash_next
));
623 /* remove the TB from the page list */
624 if (tb
->page_addr
[0] != page_addr
) {
625 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
626 tb_page_remove(&p
->first_tb
, tb
);
627 invalidate_page_bitmap(p
);
629 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
630 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
631 tb_page_remove(&p
->first_tb
, tb
);
632 invalidate_page_bitmap(p
);
636 tb_phys_invalidate_count
++;
639 static inline void set_bits(uint8_t *tab
, int start
, int len
)
645 mask
= 0xff << (start
& 7);
646 if ((start
& ~7) == (end
& ~7)) {
648 mask
&= ~(0xff << (end
& 7));
653 start
= (start
+ 8) & ~7;
655 while (start
< end1
) {
660 mask
= ~(0xff << (end
& 7));
666 static void build_page_bitmap(PageDesc
*p
)
668 int n
, tb_start
, tb_end
;
669 TranslationBlock
*tb
;
671 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
674 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
679 tb
= (TranslationBlock
*)((long)tb
& ~3);
680 /* NOTE: this is subtle as a TB may span two physical pages */
682 /* NOTE: tb_end may be after the end of the page, but
683 it is not a problem */
684 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
685 tb_end
= tb_start
+ tb
->size
;
686 if (tb_end
> TARGET_PAGE_SIZE
)
687 tb_end
= TARGET_PAGE_SIZE
;
690 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
692 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
693 tb
= tb
->page_next
[n
];
697 #ifdef TARGET_HAS_PRECISE_SMC
699 static void tb_gen_code(CPUState
*env
,
700 target_ulong pc
, target_ulong cs_base
, int flags
,
703 TranslationBlock
*tb
;
705 target_ulong phys_pc
, phys_page2
, virt_page2
;
708 phys_pc
= get_phys_addr_code(env
, pc
);
711 /* flush must be done */
713 /* cannot fail at this point */
716 tc_ptr
= code_gen_ptr
;
718 tb
->cs_base
= cs_base
;
721 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
722 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
724 /* check next page if needed */
725 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
727 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
728 phys_page2
= get_phys_addr_code(env
, virt_page2
);
730 tb_link_phys(tb
, phys_pc
, phys_page2
);
734 /* invalidate all TBs which intersect with the target physical page
735 starting in range [start;end[. NOTE: start and end must refer to
736 the same physical page. 'is_cpu_write_access' should be true if called
737 from a real cpu write access: the virtual CPU will exit the current
738 TB if code is modified inside this TB. */
739 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
740 int is_cpu_write_access
)
742 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
743 CPUState
*env
= cpu_single_env
;
745 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
746 target_ulong tb_start
, tb_end
;
747 target_ulong current_pc
, current_cs_base
;
749 p
= page_find(start
>> TARGET_PAGE_BITS
);
752 if (!p
->code_bitmap
&&
753 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
754 is_cpu_write_access
) {
755 /* build code bitmap */
756 build_page_bitmap(p
);
759 /* we remove all the TBs in the range [start, end[ */
760 /* XXX: see if in some cases it could be faster to invalidate all the code */
761 current_tb_not_found
= is_cpu_write_access
;
762 current_tb_modified
= 0;
763 current_tb
= NULL
; /* avoid warning */
764 current_pc
= 0; /* avoid warning */
765 current_cs_base
= 0; /* avoid warning */
766 current_flags
= 0; /* avoid warning */
770 tb
= (TranslationBlock
*)((long)tb
& ~3);
771 tb_next
= tb
->page_next
[n
];
772 /* NOTE: this is subtle as a TB may span two physical pages */
774 /* NOTE: tb_end may be after the end of the page, but
775 it is not a problem */
776 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
777 tb_end
= tb_start
+ tb
->size
;
779 tb_start
= tb
->page_addr
[1];
780 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
782 if (!(tb_end
<= start
|| tb_start
>= end
)) {
783 #ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb_not_found
) {
785 current_tb_not_found
= 0;
787 if (env
->mem_write_pc
) {
788 /* now we have a real cpu fault */
789 current_tb
= tb_find_pc(env
->mem_write_pc
);
792 if (current_tb
== tb
&&
793 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
800 current_tb_modified
= 1;
801 cpu_restore_state(current_tb
, env
,
802 env
->mem_write_pc
, NULL
);
803 #if defined(TARGET_I386)
804 current_flags
= env
->hflags
;
805 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
806 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
807 current_pc
= current_cs_base
+ env
->eip
;
809 #error unsupported CPU
812 #endif /* TARGET_HAS_PRECISE_SMC */
813 saved_tb
= env
->current_tb
;
814 env
->current_tb
= NULL
;
815 tb_phys_invalidate(tb
, -1);
816 env
->current_tb
= saved_tb
;
817 if (env
->interrupt_request
&& env
->current_tb
)
818 cpu_interrupt(env
, env
->interrupt_request
);
822 #if !defined(CONFIG_USER_ONLY)
823 /* if no code remaining, no need to continue to use slow writes */
825 invalidate_page_bitmap(p
);
826 if (is_cpu_write_access
) {
827 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
831 #ifdef TARGET_HAS_PRECISE_SMC
832 if (current_tb_modified
) {
833 /* we generate a block containing just the instruction
834 modifying the memory. It will ensure that it cannot modify
836 env
->current_tb
= NULL
;
837 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
839 cpu_resume_from_signal(env
, NULL
);
844 /* len must be <= 8 and start must be a multiple of len */
845 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
852 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
853 cpu_single_env
->mem_write_vaddr
, len
,
855 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
859 p
= page_find(start
>> TARGET_PAGE_BITS
);
862 if (p
->code_bitmap
) {
863 offset
= start
& ~TARGET_PAGE_MASK
;
864 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
865 if (b
& ((1 << len
) - 1))
869 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
873 #if !defined(CONFIG_SOFTMMU)
874 static void tb_invalidate_phys_page(target_ulong addr
,
875 unsigned long pc
, void *puc
)
877 int n
, current_flags
, current_tb_modified
;
878 target_ulong current_pc
, current_cs_base
;
880 TranslationBlock
*tb
, *current_tb
;
881 #ifdef TARGET_HAS_PRECISE_SMC
882 CPUState
*env
= cpu_single_env
;
885 addr
&= TARGET_PAGE_MASK
;
886 p
= page_find(addr
>> TARGET_PAGE_BITS
);
890 current_tb_modified
= 0;
892 current_pc
= 0; /* avoid warning */
893 current_cs_base
= 0; /* avoid warning */
894 current_flags
= 0; /* avoid warning */
895 #ifdef TARGET_HAS_PRECISE_SMC
897 current_tb
= tb_find_pc(pc
);
902 tb
= (TranslationBlock
*)((long)tb
& ~3);
903 #ifdef TARGET_HAS_PRECISE_SMC
904 if (current_tb
== tb
&&
905 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
906 /* If we are modifying the current TB, we must stop
907 its execution. We could be more precise by checking
908 that the modification is after the current PC, but it
909 would require a specialized function to partially
910 restore the CPU state */
912 current_tb_modified
= 1;
913 cpu_restore_state(current_tb
, env
, pc
, puc
);
914 #if defined(TARGET_I386)
915 current_flags
= env
->hflags
;
916 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
917 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
918 current_pc
= current_cs_base
+ env
->eip
;
920 #error unsupported CPU
923 #endif /* TARGET_HAS_PRECISE_SMC */
924 tb_phys_invalidate(tb
, addr
);
925 tb
= tb
->page_next
[n
];
928 #ifdef TARGET_HAS_PRECISE_SMC
929 if (current_tb_modified
) {
930 /* we generate a block containing just the instruction
931 modifying the memory. It will ensure that it cannot modify
933 env
->current_tb
= NULL
;
934 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
936 cpu_resume_from_signal(env
, puc
);
942 /* add the tb in the target page and protect it if necessary */
943 static inline void tb_alloc_page(TranslationBlock
*tb
,
944 unsigned int n
, unsigned int page_addr
)
947 TranslationBlock
*last_first_tb
;
949 tb
->page_addr
[n
] = page_addr
;
950 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
951 tb
->page_next
[n
] = p
->first_tb
;
952 last_first_tb
= p
->first_tb
;
953 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
954 invalidate_page_bitmap(p
);
956 #if defined(TARGET_HAS_SMC) || 1
958 #if defined(CONFIG_USER_ONLY)
959 if (p
->flags
& PAGE_WRITE
) {
960 unsigned long host_start
, host_end
, addr
;
963 /* force the host page as non writable (writes will have a
964 page fault + mprotect overhead) */
965 host_start
= page_addr
& qemu_host_page_mask
;
966 host_end
= host_start
+ qemu_host_page_size
;
968 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
969 prot
|= page_get_flags(addr
);
970 mprotect((void *)host_start
, qemu_host_page_size
,
971 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
972 #ifdef DEBUG_TB_INVALIDATE
973 printf("protecting code page: 0x%08lx\n",
976 p
->flags
&= ~PAGE_WRITE
;
979 /* if some code is already present, then the pages are already
980 protected. So we handle the case where only the first TB is
981 allocated in a physical page */
982 if (!last_first_tb
) {
983 target_ulong virt_addr
;
985 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
986 tlb_protect_code(cpu_single_env
, page_addr
, virt_addr
);
990 #endif /* TARGET_HAS_SMC */
993 /* Allocate a new translation block. Flush the translation buffer if
994 too many translation blocks or too much generated code. */
995 TranslationBlock
*tb_alloc(target_ulong pc
)
997 TranslationBlock
*tb
;
999 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
1000 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
1002 tb
= &tbs
[nb_tbs
++];
1008 /* add a new TB and link it to the physical page tables. phys_page2 is
1009 (-1) to indicate that only one page contains the TB. */
1010 void tb_link_phys(TranslationBlock
*tb
,
1011 target_ulong phys_pc
, target_ulong phys_page2
)
1014 TranslationBlock
**ptb
;
1016 /* add in the physical hash table */
1017 h
= tb_phys_hash_func(phys_pc
);
1018 ptb
= &tb_phys_hash
[h
];
1019 tb
->phys_hash_next
= *ptb
;
1022 /* add in the page list */
1023 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1024 if (phys_page2
!= -1)
1025 tb_alloc_page(tb
, 1, phys_page2
);
1027 tb
->page_addr
[1] = -1;
1028 #ifdef DEBUG_TB_CHECK
1033 /* link the tb with the other TBs */
1034 void tb_link(TranslationBlock
*tb
)
1036 #if !defined(CONFIG_USER_ONLY)
1041 /* save the code memory mappings (needed to invalidate the code) */
1042 addr
= tb
->pc
& TARGET_PAGE_MASK
;
1043 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1044 #ifdef DEBUG_TLB_CHECK
1045 if (vp
->valid_tag
== virt_valid_tag
&&
1046 vp
->phys_addr
!= tb
->page_addr
[0]) {
1047 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1048 addr
, tb
->page_addr
[0], vp
->phys_addr
);
1051 vp
->phys_addr
= tb
->page_addr
[0];
1052 if (vp
->valid_tag
!= virt_valid_tag
) {
1053 vp
->valid_tag
= virt_valid_tag
;
1054 #if !defined(CONFIG_SOFTMMU)
1059 if (tb
->page_addr
[1] != -1) {
1060 addr
+= TARGET_PAGE_SIZE
;
1061 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1062 #ifdef DEBUG_TLB_CHECK
1063 if (vp
->valid_tag
== virt_valid_tag
&&
1064 vp
->phys_addr
!= tb
->page_addr
[1]) {
1065 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1066 addr
, tb
->page_addr
[1], vp
->phys_addr
);
1069 vp
->phys_addr
= tb
->page_addr
[1];
1070 if (vp
->valid_tag
!= virt_valid_tag
) {
1071 vp
->valid_tag
= virt_valid_tag
;
1072 #if !defined(CONFIG_SOFTMMU)
1080 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1081 tb
->jmp_next
[0] = NULL
;
1082 tb
->jmp_next
[1] = NULL
;
1083 #ifdef USE_CODE_COPY
1084 tb
->cflags
&= ~CF_FP_USED
;
1085 if (tb
->cflags
& CF_TB_FP_USED
)
1086 tb
->cflags
|= CF_FP_USED
;
1089 /* init original jump addresses */
1090 if (tb
->tb_next_offset
[0] != 0xffff)
1091 tb_reset_jump(tb
, 0);
1092 if (tb
->tb_next_offset
[1] != 0xffff)
1093 tb_reset_jump(tb
, 1);
1096 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1097 tb[1].tc_ptr. Return NULL if not found */
1098 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1100 int m_min
, m_max
, m
;
1102 TranslationBlock
*tb
;
1106 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1107 tc_ptr
>= (unsigned long)code_gen_ptr
)
1109 /* binary search (cf Knuth) */
1112 while (m_min
<= m_max
) {
1113 m
= (m_min
+ m_max
) >> 1;
1115 v
= (unsigned long)tb
->tc_ptr
;
1118 else if (tc_ptr
< v
) {
1127 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1129 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1131 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1134 tb1
= tb
->jmp_next
[n
];
1136 /* find head of list */
1139 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1142 tb1
= tb1
->jmp_next
[n1
];
1144 /* we are now sure now that tb jumps to tb1 */
1147 /* remove tb from the jmp_first list */
1148 ptb
= &tb_next
->jmp_first
;
1152 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1153 if (n1
== n
&& tb1
== tb
)
1155 ptb
= &tb1
->jmp_next
[n1
];
1157 *ptb
= tb
->jmp_next
[n
];
1158 tb
->jmp_next
[n
] = NULL
;
1160 /* suppress the jump to next tb in generated code */
1161 tb_reset_jump(tb
, n
);
1163 /* suppress jumps in the tb on which we could have jumped */
1164 tb_reset_jump_recursive(tb_next
);
1168 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1170 tb_reset_jump_recursive2(tb
, 0);
1171 tb_reset_jump_recursive2(tb
, 1);
1174 #if defined(TARGET_HAS_ICE)
1175 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1177 target_ulong phys_addr
;
1179 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1180 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1184 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1185 breakpoint is reached */
1186 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1188 #if defined(TARGET_HAS_ICE)
1191 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1192 if (env
->breakpoints
[i
] == pc
)
1196 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1198 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1200 breakpoint_invalidate(env
, pc
);
1207 /* remove a breakpoint */
1208 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1210 #if defined(TARGET_HAS_ICE)
1212 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1213 if (env
->breakpoints
[i
] == pc
)
1218 env
->nb_breakpoints
--;
1219 if (i
< env
->nb_breakpoints
)
1220 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1222 breakpoint_invalidate(env
, pc
);
1229 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1230 CPU loop after each instruction */
1231 void cpu_single_step(CPUState
*env
, int enabled
)
1233 #if defined(TARGET_HAS_ICE)
1234 if (env
->singlestep_enabled
!= enabled
) {
1235 env
->singlestep_enabled
= enabled
;
1236 /* must flush all the translated code to avoid inconsistancies */
1237 /* XXX: only flush what is necessary */
1243 /* enable or disable low levels log */
1244 void cpu_set_log(int log_flags
)
1246 loglevel
= log_flags
;
1247 if (loglevel
&& !logfile
) {
1248 logfile
= fopen(logfilename
, "w");
1250 perror(logfilename
);
1253 #if !defined(CONFIG_SOFTMMU)
1254 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1256 static uint8_t logfile_buf
[4096];
1257 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1260 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1265 void cpu_set_log_filename(const char *filename
)
1267 logfilename
= strdup(filename
);
1270 /* mask must never be zero, except for A20 change call */
1271 void cpu_interrupt(CPUState
*env
, int mask
)
1273 TranslationBlock
*tb
;
1274 static int interrupt_lock
;
1276 env
->interrupt_request
|= mask
;
1277 /* if the cpu is currently executing code, we must unlink it and
1278 all the potentially executing TB */
1279 tb
= env
->current_tb
;
1280 if (tb
&& !testandset(&interrupt_lock
)) {
1281 env
->current_tb
= NULL
;
1282 tb_reset_jump_recursive(tb
);
1287 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1289 env
->interrupt_request
&= ~mask
;
1292 CPULogItem cpu_log_items
[] = {
1293 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1294 "show generated host assembly code for each compiled TB" },
1295 { CPU_LOG_TB_IN_ASM
, "in_asm",
1296 "show target assembly code for each compiled TB" },
1297 { CPU_LOG_TB_OP
, "op",
1298 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1300 { CPU_LOG_TB_OP_OPT
, "op_opt",
1301 "show micro ops after optimization for each compiled TB" },
1303 { CPU_LOG_INT
, "int",
1304 "show interrupts/exceptions in short format" },
1305 { CPU_LOG_EXEC
, "exec",
1306 "show trace before each executed TB (lots of logs)" },
1307 { CPU_LOG_TB_CPU
, "cpu",
1308 "show CPU state before bloc translation" },
1310 { CPU_LOG_PCALL
, "pcall",
1311 "show protected mode far calls/returns/exceptions" },
1314 { CPU_LOG_IOPORT
, "ioport",
1315 "show all i/o ports accesses" },
1320 static int cmp1(const char *s1
, int n
, const char *s2
)
1322 if (strlen(s2
) != n
)
1324 return memcmp(s1
, s2
, n
) == 0;
1327 /* takes a comma separated list of log masks. Return 0 if error. */
1328 int cpu_str_to_log_mask(const char *str
)
1337 p1
= strchr(p
, ',');
1340 if(cmp1(p
,p1
-p
,"all")) {
1341 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1345 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1346 if (cmp1(p
, p1
- p
, item
->name
))
1360 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1365 fprintf(stderr
, "qemu: fatal: ");
1366 vfprintf(stderr
, fmt
, ap
);
1367 fprintf(stderr
, "\n");
1369 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1371 cpu_dump_state(env
, stderr
, fprintf
, 0);
1377 #if !defined(CONFIG_USER_ONLY)
1379 /* NOTE: if flush_global is true, also flush global entries (not
1381 void tlb_flush(CPUState
*env
, int flush_global
)
1385 #if defined(DEBUG_TLB)
1386 printf("tlb_flush:\n");
1388 /* must reset current TB so that interrupts cannot modify the
1389 links while we are modifying them */
1390 env
->current_tb
= NULL
;
1392 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1393 env
->tlb_read
[0][i
].address
= -1;
1394 env
->tlb_write
[0][i
].address
= -1;
1395 env
->tlb_read
[1][i
].address
= -1;
1396 env
->tlb_write
[1][i
].address
= -1;
1400 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
1402 #if !defined(CONFIG_SOFTMMU)
1403 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1406 if (env
->kqemu_enabled
) {
1407 kqemu_flush(env
, flush_global
);
1413 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1415 if (addr
== (tlb_entry
->address
&
1416 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1417 tlb_entry
->address
= -1;
1420 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1425 TranslationBlock
*tb
;
1427 #if defined(DEBUG_TLB)
1428 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1430 /* must reset current TB so that interrupts cannot modify the
1431 links while we are modifying them */
1432 env
->current_tb
= NULL
;
1434 addr
&= TARGET_PAGE_MASK
;
1435 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1436 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1437 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1438 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1439 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1441 /* remove from the virtual pc hash table all the TB at this
1444 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1445 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1446 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1448 /* we remove all the links to the TBs in this virtual page */
1450 while (tb
!= NULL
) {
1452 tb
= (TranslationBlock
*)((long)tb
& ~3);
1453 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1454 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1457 tb
= tb
->page_next
[n
];
1463 #if !defined(CONFIG_SOFTMMU)
1464 if (addr
< MMAP_AREA_END
)
1465 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1468 if (env
->kqemu_enabled
) {
1469 kqemu_flush_page(env
, addr
);
1474 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1476 if (addr
== (tlb_entry
->address
&
1477 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1478 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1479 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1483 /* update the TLBs so that writes to code in the virtual page 'addr'
1485 static void tlb_protect_code(CPUState
*env
, ram_addr_t ram_addr
,
1490 vaddr
&= TARGET_PAGE_MASK
;
1491 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1492 tlb_protect_code1(&env
->tlb_write
[0][i
], vaddr
);
1493 tlb_protect_code1(&env
->tlb_write
[1][i
], vaddr
);
1495 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] &= ~CODE_DIRTY_FLAG
;
1497 if (env
->kqemu_enabled
) {
1498 kqemu_set_notdirty(env
, ram_addr
);
1502 #if !defined(CONFIG_SOFTMMU)
1503 /* NOTE: as we generated the code for this page, it is already at
1505 if (vaddr
< MMAP_AREA_END
)
1506 mprotect((void *)vaddr
, TARGET_PAGE_SIZE
, PROT_READ
);
1510 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1511 tested for self modifying code */
1512 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1515 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1518 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1519 unsigned long start
, unsigned long length
)
1522 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1523 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1524 if ((addr
- start
) < length
) {
1525 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1530 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1534 unsigned long length
, start1
;
1538 start
&= TARGET_PAGE_MASK
;
1539 end
= TARGET_PAGE_ALIGN(end
);
1541 length
= end
- start
;
1544 mask
= ~dirty_flags
;
1545 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1546 len
= length
>> TARGET_PAGE_BITS
;
1547 for(i
= 0; i
< len
; i
++)
1550 env
= cpu_single_env
;
1552 if (env
->kqemu_enabled
) {
1553 for(i
= 0; i
< len
; i
++)
1554 kqemu_set_notdirty(env
, (unsigned long)i
<< TARGET_PAGE_BITS
);
1557 /* we modify the TLB cache so that the dirty bit will be set again
1558 when accessing the range */
1559 start1
= start
+ (unsigned long)phys_ram_base
;
1560 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1561 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1562 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1563 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1565 #if !defined(CONFIG_SOFTMMU)
1566 /* XXX: this is expensive */
1572 for(i
= 0; i
< L1_SIZE
; i
++) {
1575 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1576 for(j
= 0; j
< L2_SIZE
; j
++) {
1577 if (p
->valid_tag
== virt_valid_tag
&&
1578 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1579 (p
->prot
& PROT_WRITE
)) {
1580 if (addr
< MMAP_AREA_END
) {
1581 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1582 p
->prot
& ~PROT_WRITE
);
1585 addr
+= TARGET_PAGE_SIZE
;
1594 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1596 ram_addr_t ram_addr
;
1598 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1599 ram_addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) +
1600 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1601 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1602 tlb_entry
->address
|= IO_MEM_NOTDIRTY
;
1607 /* update the TLB according to the current state of the dirty bits */
1608 void cpu_tlb_update_dirty(CPUState
*env
)
1611 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1612 tlb_update_dirty(&env
->tlb_write
[0][i
]);
1613 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1614 tlb_update_dirty(&env
->tlb_write
[1][i
]);
1617 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1618 unsigned long start
)
1621 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1622 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1623 if (addr
== start
) {
1624 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1629 /* update the TLB corresponding to virtual page vaddr and phys addr
1630 addr so that it is no longer dirty */
1631 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1633 CPUState
*env
= cpu_single_env
;
1636 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 0xff;
1638 addr
&= TARGET_PAGE_MASK
;
1639 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1640 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1641 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1644 /* add a new TLB entry. At most one entry for a given virtual address
1645 is permitted. Return 0 if OK or 2 if the page could not be mapped
1646 (can only happen in non SOFTMMU mode for I/O pages or pages
1647 conflicting with the host address space). */
1648 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1649 target_phys_addr_t paddr
, int prot
,
1650 int is_user
, int is_softmmu
)
1655 target_ulong address
;
1656 target_phys_addr_t addend
;
1659 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1661 pd
= IO_MEM_UNASSIGNED
;
1663 pd
= p
->phys_offset
;
1665 #if defined(DEBUG_TLB)
1666 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1667 vaddr
, paddr
, prot
, is_user
, is_softmmu
, pd
);
1671 #if !defined(CONFIG_SOFTMMU)
1675 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1676 /* IO memory case */
1677 address
= vaddr
| pd
;
1680 /* standard memory */
1682 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1685 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1687 if (prot
& PAGE_READ
) {
1688 env
->tlb_read
[is_user
][index
].address
= address
;
1689 env
->tlb_read
[is_user
][index
].addend
= addend
;
1691 env
->tlb_read
[is_user
][index
].address
= -1;
1692 env
->tlb_read
[is_user
][index
].addend
= -1;
1694 if (prot
& PAGE_WRITE
) {
1695 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1696 /* ROM: access is ignored (same as unassigned) */
1697 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1698 env
->tlb_write
[is_user
][index
].addend
= addend
;
1699 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1700 !cpu_physical_memory_is_dirty(pd
)) {
1701 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1702 env
->tlb_write
[is_user
][index
].addend
= addend
;
1704 env
->tlb_write
[is_user
][index
].address
= address
;
1705 env
->tlb_write
[is_user
][index
].addend
= addend
;
1708 env
->tlb_write
[is_user
][index
].address
= -1;
1709 env
->tlb_write
[is_user
][index
].addend
= -1;
1712 #if !defined(CONFIG_SOFTMMU)
1714 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1715 /* IO access: no mapping is done as it will be handled by the
1717 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1722 if (vaddr
>= MMAP_AREA_END
) {
1725 if (prot
& PROT_WRITE
) {
1726 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1727 #if defined(TARGET_HAS_SMC) || 1
1730 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1731 !cpu_physical_memory_is_dirty(pd
))) {
1732 /* ROM: we do as if code was inside */
1733 /* if code is present, we only map as read only and save the
1737 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1740 vp
->valid_tag
= virt_valid_tag
;
1741 prot
&= ~PAGE_WRITE
;
1744 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1745 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1746 if (map_addr
== MAP_FAILED
) {
1747 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1757 /* called from signal handler: invalidate the code and unprotect the
1758 page. Return TRUE if the fault was succesfully handled. */
1759 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1761 #if !defined(CONFIG_SOFTMMU)
1764 #if defined(DEBUG_TLB)
1765 printf("page_unprotect: addr=0x%08x\n", addr
);
1767 addr
&= TARGET_PAGE_MASK
;
1769 /* if it is not mapped, no need to worry here */
1770 if (addr
>= MMAP_AREA_END
)
1772 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1775 /* NOTE: in this case, validate_tag is _not_ tested as it
1776 validates only the code TLB */
1777 if (vp
->valid_tag
!= virt_valid_tag
)
1779 if (!(vp
->prot
& PAGE_WRITE
))
1781 #if defined(DEBUG_TLB)
1782 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1783 addr
, vp
->phys_addr
, vp
->prot
);
1785 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1786 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1787 (unsigned long)addr
, vp
->prot
);
1788 /* set the dirty bit */
1789 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1790 /* flush the code inside */
1791 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1800 void tlb_flush(CPUState
*env
, int flush_global
)
1804 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1808 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1809 target_phys_addr_t paddr
, int prot
,
1810 int is_user
, int is_softmmu
)
1815 /* dump memory mappings */
1816 void page_dump(FILE *f
)
1818 unsigned long start
, end
;
1819 int i
, j
, prot
, prot1
;
1822 fprintf(f
, "%-8s %-8s %-8s %s\n",
1823 "start", "end", "size", "prot");
1827 for(i
= 0; i
<= L1_SIZE
; i
++) {
1832 for(j
= 0;j
< L2_SIZE
; j
++) {
1837 if (prot1
!= prot
) {
1838 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1840 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1841 start
, end
, end
- start
,
1842 prot
& PAGE_READ
? 'r' : '-',
1843 prot
& PAGE_WRITE
? 'w' : '-',
1844 prot
& PAGE_EXEC
? 'x' : '-');
1858 int page_get_flags(unsigned long address
)
1862 p
= page_find(address
>> TARGET_PAGE_BITS
);
1868 /* modify the flags of a page and invalidate the code if
1869 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1870 depending on PAGE_WRITE */
1871 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1876 start
= start
& TARGET_PAGE_MASK
;
1877 end
= TARGET_PAGE_ALIGN(end
);
1878 if (flags
& PAGE_WRITE
)
1879 flags
|= PAGE_WRITE_ORG
;
1880 spin_lock(&tb_lock
);
1881 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1882 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1883 /* if the write protection is set, then we invalidate the code
1885 if (!(p
->flags
& PAGE_WRITE
) &&
1886 (flags
& PAGE_WRITE
) &&
1888 tb_invalidate_phys_page(addr
, 0, NULL
);
1892 spin_unlock(&tb_lock
);
1895 /* called from signal handler: invalidate the code and unprotect the
1896 page. Return TRUE if the fault was succesfully handled. */
1897 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1899 unsigned int page_index
, prot
, pindex
;
1901 unsigned long host_start
, host_end
, addr
;
1903 host_start
= address
& qemu_host_page_mask
;
1904 page_index
= host_start
>> TARGET_PAGE_BITS
;
1905 p1
= page_find(page_index
);
1908 host_end
= host_start
+ qemu_host_page_size
;
1911 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1915 /* if the page was really writable, then we change its
1916 protection back to writable */
1917 if (prot
& PAGE_WRITE_ORG
) {
1918 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1919 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1920 mprotect((void *)host_start
, qemu_host_page_size
,
1921 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1922 p1
[pindex
].flags
|= PAGE_WRITE
;
1923 /* and since the content will be modified, we must invalidate
1924 the corresponding translated code. */
1925 tb_invalidate_phys_page(address
, pc
, puc
);
1926 #ifdef DEBUG_TB_CHECK
1927 tb_invalidate_check(address
);
1935 /* call this function when system calls directly modify a memory area */
1936 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1938 unsigned long start
, end
, addr
;
1940 start
= (unsigned long)data
;
1941 end
= start
+ data_size
;
1942 start
&= TARGET_PAGE_MASK
;
1943 end
= TARGET_PAGE_ALIGN(end
);
1944 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1945 page_unprotect(addr
, 0, NULL
);
1949 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1952 #endif /* defined(CONFIG_USER_ONLY) */
1954 /* register physical memory. 'size' must be a multiple of the target
1955 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1957 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1959 unsigned long phys_offset
)
1961 target_phys_addr_t addr
, end_addr
;
1964 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1965 end_addr
= start_addr
+ size
;
1966 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1967 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1968 p
->phys_offset
= phys_offset
;
1969 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1970 phys_offset
+= TARGET_PAGE_SIZE
;
1974 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1979 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1983 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1984 unassigned_mem_readb
,
1985 unassigned_mem_readb
,
1986 unassigned_mem_readb
,
1989 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1990 unassigned_mem_writeb
,
1991 unassigned_mem_writeb
,
1992 unassigned_mem_writeb
,
1995 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1997 unsigned long ram_addr
;
1999 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2000 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2001 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2002 #if !defined(CONFIG_USER_ONLY)
2003 tb_invalidate_phys_page_fast(ram_addr
, 1);
2004 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2007 stb_p((uint8_t *)(long)addr
, val
);
2008 /* we set the page as dirty only if the code has been flushed */
2009 if (dirty_flags
& CODE_DIRTY_FLAG
)
2010 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2013 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2015 unsigned long ram_addr
;
2017 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2018 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2019 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2020 #if !defined(CONFIG_USER_ONLY)
2021 tb_invalidate_phys_page_fast(ram_addr
, 2);
2022 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2025 stw_p((uint8_t *)(long)addr
, val
);
2026 /* we set the page as dirty only if the code has been flushed */
2027 if (dirty_flags
& CODE_DIRTY_FLAG
)
2028 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2031 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2033 unsigned long ram_addr
;
2035 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2036 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2037 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2038 #if !defined(CONFIG_USER_ONLY)
2039 tb_invalidate_phys_page_fast(ram_addr
, 4);
2040 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2043 stl_p((uint8_t *)(long)addr
, val
);
2044 /* we set the page as dirty only if the code has been flushed */
2045 if (dirty_flags
& CODE_DIRTY_FLAG
)
2046 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
2049 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2050 NULL
, /* never used */
2051 NULL
, /* never used */
2052 NULL
, /* never used */
2055 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2056 notdirty_mem_writeb
,
2057 notdirty_mem_writew
,
2058 notdirty_mem_writel
,
2061 static void io_mem_init(void)
2063 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2064 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2065 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2068 /* alloc dirty bits array */
2069 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2070 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2073 /* mem_read and mem_write are arrays of functions containing the
2074 function to access byte (index 0), word (index 1) and dword (index
2075 2). All functions must be supplied. If io_index is non zero, the
2076 corresponding io zone is modified. If it is zero, a new io zone is
2077 allocated. The return value can be used with
2078 cpu_register_physical_memory(). (-1) is returned if error. */
2079 int cpu_register_io_memory(int io_index
,
2080 CPUReadMemoryFunc
**mem_read
,
2081 CPUWriteMemoryFunc
**mem_write
,
2086 if (io_index
<= 0) {
2087 if (io_index
>= IO_MEM_NB_ENTRIES
)
2089 io_index
= io_mem_nb
++;
2091 if (io_index
>= IO_MEM_NB_ENTRIES
)
2095 for(i
= 0;i
< 3; i
++) {
2096 io_mem_read
[io_index
][i
] = mem_read
[i
];
2097 io_mem_write
[io_index
][i
] = mem_write
[i
];
2099 io_mem_opaque
[io_index
] = opaque
;
2100 return io_index
<< IO_MEM_SHIFT
;
2103 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2105 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2108 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2110 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2113 /* physical memory access (slow version, mainly for debug) */
2114 #if defined(CONFIG_USER_ONLY)
2115 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2116 int len
, int is_write
)
2122 page
= addr
& TARGET_PAGE_MASK
;
2123 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2126 flags
= page_get_flags(page
);
2127 if (!(flags
& PAGE_VALID
))
2130 if (!(flags
& PAGE_WRITE
))
2132 memcpy((uint8_t *)addr
, buf
, len
);
2134 if (!(flags
& PAGE_READ
))
2136 memcpy(buf
, (uint8_t *)addr
, len
);
2145 uint32_t ldl_phys(target_phys_addr_t addr
)
2150 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2154 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2159 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2160 int len
, int is_write
)
2165 target_phys_addr_t page
;
2170 page
= addr
& TARGET_PAGE_MASK
;
2171 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2174 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2176 pd
= IO_MEM_UNASSIGNED
;
2178 pd
= p
->phys_offset
;
2182 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2183 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2184 if (l
>= 4 && ((addr
& 3) == 0)) {
2185 /* 32 bit read access */
2187 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2189 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2190 /* 16 bit read access */
2192 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2197 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2201 unsigned long addr1
;
2202 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2204 ptr
= phys_ram_base
+ addr1
;
2205 memcpy(ptr
, buf
, l
);
2206 if (!cpu_physical_memory_is_dirty(addr1
)) {
2207 /* invalidate code */
2208 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2210 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] = 0xff;
2214 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2216 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2217 if (l
>= 4 && ((addr
& 3) == 0)) {
2218 /* 32 bit read access */
2219 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2222 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2223 /* 16 bit read access */
2224 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2229 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2235 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2236 (addr
& ~TARGET_PAGE_MASK
);
2237 memcpy(buf
, ptr
, l
);
2246 /* warning: addr must be aligned */
2247 uint32_t ldl_phys(target_phys_addr_t addr
)
2255 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2257 pd
= IO_MEM_UNASSIGNED
;
2259 pd
= p
->phys_offset
;
2262 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2264 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2265 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2268 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2269 (addr
& ~TARGET_PAGE_MASK
);
2275 /* warning: addr must be aligned. The ram page is not masked as dirty
2276 and the code inside is not invalidated. It is useful if the dirty
2277 bits are used to track modified PTEs */
2278 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2285 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2287 pd
= IO_MEM_UNASSIGNED
;
2289 pd
= p
->phys_offset
;
2292 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2293 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2294 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2296 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2297 (addr
& ~TARGET_PAGE_MASK
);
2302 /* warning: addr must be aligned */
2303 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2310 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2312 pd
= IO_MEM_UNASSIGNED
;
2314 pd
= p
->phys_offset
;
2317 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2318 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2319 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2321 unsigned long addr1
;
2322 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2324 ptr
= phys_ram_base
+ addr1
;
2326 if (!cpu_physical_memory_is_dirty(addr1
)) {
2327 /* invalidate code */
2328 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2330 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] = 0xff;
2337 /* virtual memory access for debug */
2338 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2339 uint8_t *buf
, int len
, int is_write
)
2342 target_ulong page
, phys_addr
;
2345 page
= addr
& TARGET_PAGE_MASK
;
2346 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2347 /* if no physical page mapped, return an error */
2348 if (phys_addr
== -1)
2350 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2353 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2362 void dump_exec_info(FILE *f
,
2363 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2365 int i
, target_code_size
, max_target_code_size
;
2366 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2367 TranslationBlock
*tb
;
2369 target_code_size
= 0;
2370 max_target_code_size
= 0;
2372 direct_jmp_count
= 0;
2373 direct_jmp2_count
= 0;
2374 for(i
= 0; i
< nb_tbs
; i
++) {
2376 target_code_size
+= tb
->size
;
2377 if (tb
->size
> max_target_code_size
)
2378 max_target_code_size
= tb
->size
;
2379 if (tb
->page_addr
[1] != -1)
2381 if (tb
->tb_next_offset
[0] != 0xffff) {
2383 if (tb
->tb_next_offset
[1] != 0xffff) {
2384 direct_jmp2_count
++;
2388 /* XXX: avoid using doubles ? */
2389 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2390 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2391 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2392 max_target_code_size
);
2393 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2394 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2395 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2396 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2398 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2399 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2401 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2403 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2404 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2405 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2406 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2409 #if !defined(CONFIG_USER_ONLY)
2411 #define MMUSUFFIX _cmmu
2412 #define GETPC() NULL
2413 #define env cpu_single_env
2414 #define SOFTMMU_CODE_ACCESS
2417 #include "softmmu_template.h"
2420 #include "softmmu_template.h"
2423 #include "softmmu_template.h"
2426 #include "softmmu_template.h"