2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
55 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
56 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
58 /* any access to the tbs or the page table must use this lock */
59 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
61 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
62 uint8_t *code_gen_ptr
;
66 uint8_t *phys_ram_base
;
67 uint8_t *phys_ram_dirty
;
69 typedef struct PageDesc
{
70 /* list of TBs intersecting this ram page */
71 TranslationBlock
*first_tb
;
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count
;
76 #if defined(CONFIG_USER_ONLY)
81 typedef struct PhysPageDesc
{
82 /* offset in host memory of the page + io_index in the low 12 bits */
83 unsigned long phys_offset
;
86 typedef struct VirtPageDesc
{
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr
;
90 unsigned int valid_tag
;
91 #if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
99 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101 #define L1_SIZE (1 << L1_BITS)
102 #define L2_SIZE (1 << L2_BITS)
104 static void io_mem_init(void);
106 unsigned long qemu_real_host_page_size
;
107 unsigned long qemu_host_page_bits
;
108 unsigned long qemu_host_page_size
;
109 unsigned long qemu_host_page_mask
;
111 /* XXX: for system emulation, it could just be an array */
112 static PageDesc
*l1_map
[L1_SIZE
];
113 static PhysPageDesc
*l1_phys_map
[L1_SIZE
];
115 #if !defined(CONFIG_USER_ONLY)
116 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
117 static unsigned int virt_valid_tag
;
120 /* io memory support */
121 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
122 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
123 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
124 static int io_mem_nb
;
127 char *logfilename
= "/tmp/qemu.log";
132 static int tlb_flush_count
;
133 static int tb_flush_count
;
134 static int tb_phys_invalidate_count
;
136 static void page_init(void)
138 /* NOTE: we can always suppose that qemu_host_page_size >=
142 SYSTEM_INFO system_info
;
145 GetSystemInfo(&system_info
);
146 qemu_real_host_page_size
= system_info
.dwPageSize
;
148 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
149 PAGE_EXECUTE_READWRITE
, &old_protect
);
152 qemu_real_host_page_size
= getpagesize();
154 unsigned long start
, end
;
156 start
= (unsigned long)code_gen_buffer
;
157 start
&= ~(qemu_real_host_page_size
- 1);
159 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
160 end
+= qemu_real_host_page_size
- 1;
161 end
&= ~(qemu_real_host_page_size
- 1);
163 mprotect((void *)start
, end
- start
,
164 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
168 if (qemu_host_page_size
== 0)
169 qemu_host_page_size
= qemu_real_host_page_size
;
170 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
171 qemu_host_page_size
= TARGET_PAGE_SIZE
;
172 qemu_host_page_bits
= 0;
173 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
174 qemu_host_page_bits
++;
175 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
176 #if !defined(CONFIG_USER_ONLY)
181 static inline PageDesc
*page_find_alloc(unsigned int index
)
185 lp
= &l1_map
[index
>> L2_BITS
];
188 /* allocate if not found */
189 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
190 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
193 return p
+ (index
& (L2_SIZE
- 1));
196 static inline PageDesc
*page_find(unsigned int index
)
200 p
= l1_map
[index
>> L2_BITS
];
203 return p
+ (index
& (L2_SIZE
- 1));
206 static inline PhysPageDesc
*phys_page_find_alloc(unsigned int index
)
208 PhysPageDesc
**lp
, *p
;
210 lp
= &l1_phys_map
[index
>> L2_BITS
];
213 /* allocate if not found */
214 p
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
215 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
218 return p
+ (index
& (L2_SIZE
- 1));
221 static inline PhysPageDesc
*phys_page_find(unsigned int index
)
225 p
= l1_phys_map
[index
>> L2_BITS
];
228 return p
+ (index
& (L2_SIZE
- 1));
231 #if !defined(CONFIG_USER_ONLY)
232 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
233 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
235 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
237 VirtPageDesc
**lp
, *p
;
239 /* XXX: should not truncate for 64 bit addresses */
240 #if TARGET_LONG_BITS > 32
241 index
&= (L1_SIZE
- 1);
243 lp
= &l1_virt_map
[index
>> L2_BITS
];
246 /* allocate if not found */
247 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
248 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
251 return p
+ (index
& (L2_SIZE
- 1));
254 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
258 p
= l1_virt_map
[index
>> L2_BITS
];
261 return p
+ (index
& (L2_SIZE
- 1));
264 static void virt_page_flush(void)
271 if (virt_valid_tag
== 0) {
273 for(i
= 0; i
< L1_SIZE
; i
++) {
276 for(j
= 0; j
< L2_SIZE
; j
++)
283 static void virt_page_flush(void)
288 void cpu_exec_init(void)
291 code_gen_ptr
= code_gen_buffer
;
297 static inline void invalidate_page_bitmap(PageDesc
*p
)
299 if (p
->code_bitmap
) {
300 qemu_free(p
->code_bitmap
);
301 p
->code_bitmap
= NULL
;
303 p
->code_write_count
= 0;
306 /* set to NULL all the 'first_tb' fields in all PageDescs */
307 static void page_flush_tb(void)
312 for(i
= 0; i
< L1_SIZE
; i
++) {
315 for(j
= 0; j
< L2_SIZE
; j
++) {
317 invalidate_page_bitmap(p
);
324 /* flush all the translation blocks */
325 /* XXX: tb_flush is currently not thread safe */
326 void tb_flush(CPUState
*env
)
328 #if defined(DEBUG_FLUSH)
329 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
330 code_gen_ptr
- code_gen_buffer
,
332 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
335 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
338 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
341 code_gen_ptr
= code_gen_buffer
;
342 /* XXX: flush processor icache at this point if cache flush is
347 #ifdef DEBUG_TB_CHECK
349 static void tb_invalidate_check(unsigned long address
)
351 TranslationBlock
*tb
;
353 address
&= TARGET_PAGE_MASK
;
354 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
355 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
356 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
357 address
>= tb
->pc
+ tb
->size
)) {
358 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
359 address
, tb
->pc
, tb
->size
);
365 /* verify that all the pages have correct rights for code */
366 static void tb_page_check(void)
368 TranslationBlock
*tb
;
369 int i
, flags1
, flags2
;
371 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
372 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
373 flags1
= page_get_flags(tb
->pc
);
374 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
375 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
376 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
377 tb
->pc
, tb
->size
, flags1
, flags2
);
383 void tb_jmp_check(TranslationBlock
*tb
)
385 TranslationBlock
*tb1
;
388 /* suppress any remaining jumps to this TB */
392 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
395 tb1
= tb1
->jmp_next
[n1
];
397 /* check end of list */
399 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
405 /* invalidate one TB */
406 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
409 TranslationBlock
*tb1
;
413 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
416 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
420 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
422 TranslationBlock
*tb1
;
428 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
430 *ptb
= tb1
->page_next
[n1
];
433 ptb
= &tb1
->page_next
[n1
];
437 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
439 TranslationBlock
*tb1
, **ptb
;
442 ptb
= &tb
->jmp_next
[n
];
445 /* find tb(n) in circular list */
449 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
450 if (n1
== n
&& tb1
== tb
)
453 ptb
= &tb1
->jmp_first
;
455 ptb
= &tb1
->jmp_next
[n1
];
458 /* now we can suppress tb(n) from the list */
459 *ptb
= tb
->jmp_next
[n
];
461 tb
->jmp_next
[n
] = NULL
;
465 /* reset the jump entry 'n' of a TB so that it is not chained to
467 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
469 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
472 static inline void tb_invalidate(TranslationBlock
*tb
)
475 TranslationBlock
*tb1
, *tb2
, **ptb
;
477 tb_invalidated_flag
= 1;
479 /* remove the TB from the hash list */
480 h
= tb_hash_func(tb
->pc
);
484 /* NOTE: the TB is not necessarily linked in the hash. It
485 indicates that it is not currently used */
489 *ptb
= tb1
->hash_next
;
492 ptb
= &tb1
->hash_next
;
495 /* suppress this TB from the two jump lists */
496 tb_jmp_remove(tb
, 0);
497 tb_jmp_remove(tb
, 1);
499 /* suppress any remaining jumps to this TB */
505 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
506 tb2
= tb1
->jmp_next
[n1
];
507 tb_reset_jump(tb1
, n1
);
508 tb1
->jmp_next
[n1
] = NULL
;
511 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
514 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
518 target_ulong phys_pc
;
520 /* remove the TB from the hash list */
521 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
522 h
= tb_phys_hash_func(phys_pc
);
523 tb_remove(&tb_phys_hash
[h
], tb
,
524 offsetof(TranslationBlock
, phys_hash_next
));
526 /* remove the TB from the page list */
527 if (tb
->page_addr
[0] != page_addr
) {
528 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
529 tb_page_remove(&p
->first_tb
, tb
);
530 invalidate_page_bitmap(p
);
532 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
533 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
534 tb_page_remove(&p
->first_tb
, tb
);
535 invalidate_page_bitmap(p
);
539 tb_phys_invalidate_count
++;
542 static inline void set_bits(uint8_t *tab
, int start
, int len
)
548 mask
= 0xff << (start
& 7);
549 if ((start
& ~7) == (end
& ~7)) {
551 mask
&= ~(0xff << (end
& 7));
556 start
= (start
+ 8) & ~7;
558 while (start
< end1
) {
563 mask
= ~(0xff << (end
& 7));
569 static void build_page_bitmap(PageDesc
*p
)
571 int n
, tb_start
, tb_end
;
572 TranslationBlock
*tb
;
574 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
577 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
582 tb
= (TranslationBlock
*)((long)tb
& ~3);
583 /* NOTE: this is subtle as a TB may span two physical pages */
585 /* NOTE: tb_end may be after the end of the page, but
586 it is not a problem */
587 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
588 tb_end
= tb_start
+ tb
->size
;
589 if (tb_end
> TARGET_PAGE_SIZE
)
590 tb_end
= TARGET_PAGE_SIZE
;
593 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
595 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
596 tb
= tb
->page_next
[n
];
600 #ifdef TARGET_HAS_PRECISE_SMC
602 static void tb_gen_code(CPUState
*env
,
603 target_ulong pc
, target_ulong cs_base
, int flags
,
606 TranslationBlock
*tb
;
608 target_ulong phys_pc
, phys_page2
, virt_page2
;
611 phys_pc
= get_phys_addr_code(env
, pc
);
614 /* flush must be done */
616 /* cannot fail at this point */
619 tc_ptr
= code_gen_ptr
;
621 tb
->cs_base
= cs_base
;
624 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
625 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
627 /* check next page if needed */
628 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
630 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
631 phys_page2
= get_phys_addr_code(env
, virt_page2
);
633 tb_link_phys(tb
, phys_pc
, phys_page2
);
637 /* invalidate all TBs which intersect with the target physical page
638 starting in range [start;end[. NOTE: start and end must refer to
639 the same physical page. 'is_cpu_write_access' should be true if called
640 from a real cpu write access: the virtual CPU will exit the current
641 TB if code is modified inside this TB. */
642 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
643 int is_cpu_write_access
)
645 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
646 CPUState
*env
= cpu_single_env
;
648 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
649 target_ulong tb_start
, tb_end
;
650 target_ulong current_pc
, current_cs_base
;
652 p
= page_find(start
>> TARGET_PAGE_BITS
);
655 if (!p
->code_bitmap
&&
656 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
657 is_cpu_write_access
) {
658 /* build code bitmap */
659 build_page_bitmap(p
);
662 /* we remove all the TBs in the range [start, end[ */
663 /* XXX: see if in some cases it could be faster to invalidate all the code */
664 current_tb_not_found
= is_cpu_write_access
;
665 current_tb_modified
= 0;
666 current_tb
= NULL
; /* avoid warning */
667 current_pc
= 0; /* avoid warning */
668 current_cs_base
= 0; /* avoid warning */
669 current_flags
= 0; /* avoid warning */
673 tb
= (TranslationBlock
*)((long)tb
& ~3);
674 tb_next
= tb
->page_next
[n
];
675 /* NOTE: this is subtle as a TB may span two physical pages */
677 /* NOTE: tb_end may be after the end of the page, but
678 it is not a problem */
679 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
680 tb_end
= tb_start
+ tb
->size
;
682 tb_start
= tb
->page_addr
[1];
683 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
685 if (!(tb_end
<= start
|| tb_start
>= end
)) {
686 #ifdef TARGET_HAS_PRECISE_SMC
687 if (current_tb_not_found
) {
688 current_tb_not_found
= 0;
690 if (env
->mem_write_pc
) {
691 /* now we have a real cpu fault */
692 current_tb
= tb_find_pc(env
->mem_write_pc
);
695 if (current_tb
== tb
&&
696 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
697 /* If we are modifying the current TB, we must stop
698 its execution. We could be more precise by checking
699 that the modification is after the current PC, but it
700 would require a specialized function to partially
701 restore the CPU state */
703 current_tb_modified
= 1;
704 cpu_restore_state(current_tb
, env
,
705 env
->mem_write_pc
, NULL
);
706 #if defined(TARGET_I386)
707 current_flags
= env
->hflags
;
708 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
709 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
710 current_pc
= current_cs_base
+ env
->eip
;
712 #error unsupported CPU
715 #endif /* TARGET_HAS_PRECISE_SMC */
716 saved_tb
= env
->current_tb
;
717 env
->current_tb
= NULL
;
718 tb_phys_invalidate(tb
, -1);
719 env
->current_tb
= saved_tb
;
720 if (env
->interrupt_request
&& env
->current_tb
)
721 cpu_interrupt(env
, env
->interrupt_request
);
725 #if !defined(CONFIG_USER_ONLY)
726 /* if no code remaining, no need to continue to use slow writes */
728 invalidate_page_bitmap(p
);
729 if (is_cpu_write_access
) {
730 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb_modified
) {
736 /* we generate a block containing just the instruction
737 modifying the memory. It will ensure that it cannot modify
739 env
->current_tb
= NULL
;
740 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
742 cpu_resume_from_signal(env
, NULL
);
747 /* len must be <= 8 and start must be a multiple of len */
748 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
755 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
756 cpu_single_env
->mem_write_vaddr
, len
,
758 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
762 p
= page_find(start
>> TARGET_PAGE_BITS
);
765 if (p
->code_bitmap
) {
766 offset
= start
& ~TARGET_PAGE_MASK
;
767 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
768 if (b
& ((1 << len
) - 1))
772 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
776 #if !defined(CONFIG_SOFTMMU)
777 static void tb_invalidate_phys_page(target_ulong addr
,
778 unsigned long pc
, void *puc
)
780 int n
, current_flags
, current_tb_modified
;
781 target_ulong current_pc
, current_cs_base
;
783 TranslationBlock
*tb
, *current_tb
;
784 #ifdef TARGET_HAS_PRECISE_SMC
785 CPUState
*env
= cpu_single_env
;
788 addr
&= TARGET_PAGE_MASK
;
789 p
= page_find(addr
>> TARGET_PAGE_BITS
);
793 current_tb_modified
= 0;
795 current_pc
= 0; /* avoid warning */
796 current_cs_base
= 0; /* avoid warning */
797 current_flags
= 0; /* avoid warning */
798 #ifdef TARGET_HAS_PRECISE_SMC
800 current_tb
= tb_find_pc(pc
);
805 tb
= (TranslationBlock
*)((long)tb
& ~3);
806 #ifdef TARGET_HAS_PRECISE_SMC
807 if (current_tb
== tb
&&
808 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
809 /* If we are modifying the current TB, we must stop
810 its execution. We could be more precise by checking
811 that the modification is after the current PC, but it
812 would require a specialized function to partially
813 restore the CPU state */
815 current_tb_modified
= 1;
816 cpu_restore_state(current_tb
, env
, pc
, puc
);
817 #if defined(TARGET_I386)
818 current_flags
= env
->hflags
;
819 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
820 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
821 current_pc
= current_cs_base
+ env
->eip
;
823 #error unsupported CPU
826 #endif /* TARGET_HAS_PRECISE_SMC */
827 tb_phys_invalidate(tb
, addr
);
828 tb
= tb
->page_next
[n
];
831 #ifdef TARGET_HAS_PRECISE_SMC
832 if (current_tb_modified
) {
833 /* we generate a block containing just the instruction
834 modifying the memory. It will ensure that it cannot modify
836 env
->current_tb
= NULL
;
837 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
839 cpu_resume_from_signal(env
, puc
);
845 /* add the tb in the target page and protect it if necessary */
846 static inline void tb_alloc_page(TranslationBlock
*tb
,
847 unsigned int n
, unsigned int page_addr
)
850 TranslationBlock
*last_first_tb
;
852 tb
->page_addr
[n
] = page_addr
;
853 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
854 tb
->page_next
[n
] = p
->first_tb
;
855 last_first_tb
= p
->first_tb
;
856 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
857 invalidate_page_bitmap(p
);
859 #if defined(TARGET_HAS_SMC) || 1
861 #if defined(CONFIG_USER_ONLY)
862 if (p
->flags
& PAGE_WRITE
) {
863 unsigned long host_start
, host_end
, addr
;
866 /* force the host page as non writable (writes will have a
867 page fault + mprotect overhead) */
868 host_start
= page_addr
& qemu_host_page_mask
;
869 host_end
= host_start
+ qemu_host_page_size
;
871 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
872 prot
|= page_get_flags(addr
);
873 mprotect((void *)host_start
, qemu_host_page_size
,
874 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
875 #ifdef DEBUG_TB_INVALIDATE
876 printf("protecting code page: 0x%08lx\n",
879 p
->flags
&= ~PAGE_WRITE
;
882 /* if some code is already present, then the pages are already
883 protected. So we handle the case where only the first TB is
884 allocated in a physical page */
885 if (!last_first_tb
) {
886 target_ulong virt_addr
;
888 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
889 tlb_protect_code(cpu_single_env
, virt_addr
);
893 #endif /* TARGET_HAS_SMC */
896 /* Allocate a new translation block. Flush the translation buffer if
897 too many translation blocks or too much generated code. */
898 TranslationBlock
*tb_alloc(target_ulong pc
)
900 TranslationBlock
*tb
;
902 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
903 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
911 /* add a new TB and link it to the physical page tables. phys_page2 is
912 (-1) to indicate that only one page contains the TB. */
913 void tb_link_phys(TranslationBlock
*tb
,
914 target_ulong phys_pc
, target_ulong phys_page2
)
917 TranslationBlock
**ptb
;
919 /* add in the physical hash table */
920 h
= tb_phys_hash_func(phys_pc
);
921 ptb
= &tb_phys_hash
[h
];
922 tb
->phys_hash_next
= *ptb
;
925 /* add in the page list */
926 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
927 if (phys_page2
!= -1)
928 tb_alloc_page(tb
, 1, phys_page2
);
930 tb
->page_addr
[1] = -1;
931 #ifdef DEBUG_TB_CHECK
936 /* link the tb with the other TBs */
937 void tb_link(TranslationBlock
*tb
)
939 #if !defined(CONFIG_USER_ONLY)
944 /* save the code memory mappings (needed to invalidate the code) */
945 addr
= tb
->pc
& TARGET_PAGE_MASK
;
946 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
947 #ifdef DEBUG_TLB_CHECK
948 if (vp
->valid_tag
== virt_valid_tag
&&
949 vp
->phys_addr
!= tb
->page_addr
[0]) {
950 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
951 addr
, tb
->page_addr
[0], vp
->phys_addr
);
954 vp
->phys_addr
= tb
->page_addr
[0];
955 if (vp
->valid_tag
!= virt_valid_tag
) {
956 vp
->valid_tag
= virt_valid_tag
;
957 #if !defined(CONFIG_SOFTMMU)
962 if (tb
->page_addr
[1] != -1) {
963 addr
+= TARGET_PAGE_SIZE
;
964 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
965 #ifdef DEBUG_TLB_CHECK
966 if (vp
->valid_tag
== virt_valid_tag
&&
967 vp
->phys_addr
!= tb
->page_addr
[1]) {
968 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
969 addr
, tb
->page_addr
[1], vp
->phys_addr
);
972 vp
->phys_addr
= tb
->page_addr
[1];
973 if (vp
->valid_tag
!= virt_valid_tag
) {
974 vp
->valid_tag
= virt_valid_tag
;
975 #if !defined(CONFIG_SOFTMMU)
983 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
984 tb
->jmp_next
[0] = NULL
;
985 tb
->jmp_next
[1] = NULL
;
987 tb
->cflags
&= ~CF_FP_USED
;
988 if (tb
->cflags
& CF_TB_FP_USED
)
989 tb
->cflags
|= CF_FP_USED
;
992 /* init original jump addresses */
993 if (tb
->tb_next_offset
[0] != 0xffff)
994 tb_reset_jump(tb
, 0);
995 if (tb
->tb_next_offset
[1] != 0xffff)
996 tb_reset_jump(tb
, 1);
999 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1000 tb[1].tc_ptr. Return NULL if not found */
1001 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1003 int m_min
, m_max
, m
;
1005 TranslationBlock
*tb
;
1009 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1010 tc_ptr
>= (unsigned long)code_gen_ptr
)
1012 /* binary search (cf Knuth) */
1015 while (m_min
<= m_max
) {
1016 m
= (m_min
+ m_max
) >> 1;
1018 v
= (unsigned long)tb
->tc_ptr
;
1021 else if (tc_ptr
< v
) {
1030 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1032 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1034 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1037 tb1
= tb
->jmp_next
[n
];
1039 /* find head of list */
1042 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1045 tb1
= tb1
->jmp_next
[n1
];
1047 /* we are now sure now that tb jumps to tb1 */
1050 /* remove tb from the jmp_first list */
1051 ptb
= &tb_next
->jmp_first
;
1055 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1056 if (n1
== n
&& tb1
== tb
)
1058 ptb
= &tb1
->jmp_next
[n1
];
1060 *ptb
= tb
->jmp_next
[n
];
1061 tb
->jmp_next
[n
] = NULL
;
1063 /* suppress the jump to next tb in generated code */
1064 tb_reset_jump(tb
, n
);
1066 /* suppress jumps in the tb on which we could have jumped */
1067 tb_reset_jump_recursive(tb_next
);
1071 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1073 tb_reset_jump_recursive2(tb
, 0);
1074 tb_reset_jump_recursive2(tb
, 1);
1077 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1078 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1080 target_ulong phys_addr
;
1082 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1083 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1087 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1088 breakpoint is reached */
1089 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1091 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1094 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1095 if (env
->breakpoints
[i
] == pc
)
1099 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1101 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1103 breakpoint_invalidate(env
, pc
);
1110 /* remove a breakpoint */
1111 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1113 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1115 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1116 if (env
->breakpoints
[i
] == pc
)
1121 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
1122 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
1123 env
->nb_breakpoints
--;
1125 breakpoint_invalidate(env
, pc
);
1132 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1133 CPU loop after each instruction */
1134 void cpu_single_step(CPUState
*env
, int enabled
)
1136 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1137 if (env
->singlestep_enabled
!= enabled
) {
1138 env
->singlestep_enabled
= enabled
;
1139 /* must flush all the translated code to avoid inconsistancies */
1140 /* XXX: only flush what is necessary */
1146 /* enable or disable low levels log */
1147 void cpu_set_log(int log_flags
)
1149 loglevel
= log_flags
;
1150 if (loglevel
&& !logfile
) {
1151 logfile
= fopen(logfilename
, "w");
1153 perror(logfilename
);
1156 #if !defined(CONFIG_SOFTMMU)
1157 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1159 static uint8_t logfile_buf
[4096];
1160 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1163 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1168 void cpu_set_log_filename(const char *filename
)
1170 logfilename
= strdup(filename
);
1173 /* mask must never be zero, except for A20 change call */
1174 void cpu_interrupt(CPUState
*env
, int mask
)
1176 TranslationBlock
*tb
;
1177 static int interrupt_lock
;
1179 env
->interrupt_request
|= mask
;
1180 /* if the cpu is currently executing code, we must unlink it and
1181 all the potentially executing TB */
1182 tb
= env
->current_tb
;
1183 if (tb
&& !testandset(&interrupt_lock
)) {
1184 env
->current_tb
= NULL
;
1185 tb_reset_jump_recursive(tb
);
1190 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1192 env
->interrupt_request
&= ~mask
;
1195 CPULogItem cpu_log_items
[] = {
1196 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1197 "show generated host assembly code for each compiled TB" },
1198 { CPU_LOG_TB_IN_ASM
, "in_asm",
1199 "show target assembly code for each compiled TB" },
1200 { CPU_LOG_TB_OP
, "op",
1201 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1203 { CPU_LOG_TB_OP_OPT
, "op_opt",
1204 "show micro ops after optimization for each compiled TB" },
1206 { CPU_LOG_INT
, "int",
1207 "show interrupts/exceptions in short format" },
1208 { CPU_LOG_EXEC
, "exec",
1209 "show trace before each executed TB (lots of logs)" },
1210 { CPU_LOG_TB_CPU
, "cpu",
1211 "show CPU state before bloc translation" },
1213 { CPU_LOG_PCALL
, "pcall",
1214 "show protected mode far calls/returns/exceptions" },
1217 { CPU_LOG_IOPORT
, "ioport",
1218 "show all i/o ports accesses" },
1223 static int cmp1(const char *s1
, int n
, const char *s2
)
1225 if (strlen(s2
) != n
)
1227 return memcmp(s1
, s2
, n
) == 0;
1230 /* takes a comma separated list of log masks. Return 0 if error. */
1231 int cpu_str_to_log_mask(const char *str
)
1240 p1
= strchr(p
, ',');
1243 if(cmp1(p
,p1
-p
,"all")) {
1244 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1248 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1249 if (cmp1(p
, p1
- p
, item
->name
))
1263 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1268 fprintf(stderr
, "qemu: fatal: ");
1269 vfprintf(stderr
, fmt
, ap
);
1270 fprintf(stderr
, "\n");
1272 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1274 cpu_dump_state(env
, stderr
, fprintf
, 0);
1280 #if !defined(CONFIG_USER_ONLY)
1282 /* NOTE: if flush_global is true, also flush global entries (not
1284 void tlb_flush(CPUState
*env
, int flush_global
)
1288 #if defined(DEBUG_TLB)
1289 printf("tlb_flush:\n");
1291 /* must reset current TB so that interrupts cannot modify the
1292 links while we are modifying them */
1293 env
->current_tb
= NULL
;
1295 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1296 env
->tlb_read
[0][i
].address
= -1;
1297 env
->tlb_write
[0][i
].address
= -1;
1298 env
->tlb_read
[1][i
].address
= -1;
1299 env
->tlb_write
[1][i
].address
= -1;
1303 memset (tb_hash
, 0, CODE_GEN_HASH_SIZE
* sizeof (void *));
1305 #if !defined(CONFIG_SOFTMMU)
1306 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1311 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1313 if (addr
== (tlb_entry
->address
&
1314 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1315 tlb_entry
->address
= -1;
1318 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1323 TranslationBlock
*tb
;
1325 #if defined(DEBUG_TLB)
1326 printf("tlb_flush_page: 0x%08x\n", addr
);
1328 /* must reset current TB so that interrupts cannot modify the
1329 links while we are modifying them */
1330 env
->current_tb
= NULL
;
1332 addr
&= TARGET_PAGE_MASK
;
1333 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1334 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1335 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1336 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1337 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1339 /* remove from the virtual pc hash table all the TB at this
1342 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1343 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1344 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1346 /* we remove all the links to the TBs in this virtual page */
1348 while (tb
!= NULL
) {
1350 tb
= (TranslationBlock
*)((long)tb
& ~3);
1351 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1352 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1355 tb
= tb
->page_next
[n
];
1361 #if !defined(CONFIG_SOFTMMU)
1362 if (addr
< MMAP_AREA_END
)
1363 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1367 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1369 if (addr
== (tlb_entry
->address
&
1370 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1371 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1372 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1373 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1377 /* update the TLBs so that writes to code in the virtual page 'addr'
1379 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1383 addr
&= TARGET_PAGE_MASK
;
1384 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1385 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1386 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1387 #if !defined(CONFIG_SOFTMMU)
1388 /* NOTE: as we generated the code for this page, it is already at
1390 if (addr
< MMAP_AREA_END
)
1391 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1395 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1396 unsigned long phys_addr
)
1398 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1399 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1400 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1404 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1405 tested self modifying code */
1406 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1410 phys_addr
&= TARGET_PAGE_MASK
;
1411 phys_addr
+= (long)phys_ram_base
;
1412 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1413 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1414 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1417 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1418 unsigned long start
, unsigned long length
)
1421 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1422 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1423 if ((addr
- start
) < length
) {
1424 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1429 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1432 unsigned long length
, start1
;
1435 start
&= TARGET_PAGE_MASK
;
1436 end
= TARGET_PAGE_ALIGN(end
);
1438 length
= end
- start
;
1441 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1443 env
= cpu_single_env
;
1444 /* we modify the TLB cache so that the dirty bit will be set again
1445 when accessing the range */
1446 start1
= start
+ (unsigned long)phys_ram_base
;
1447 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1448 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1449 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1450 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1452 #if !defined(CONFIG_SOFTMMU)
1453 /* XXX: this is expensive */
1459 for(i
= 0; i
< L1_SIZE
; i
++) {
1462 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1463 for(j
= 0; j
< L2_SIZE
; j
++) {
1464 if (p
->valid_tag
== virt_valid_tag
&&
1465 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1466 (p
->prot
& PROT_WRITE
)) {
1467 if (addr
< MMAP_AREA_END
) {
1468 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1469 p
->prot
& ~PROT_WRITE
);
1472 addr
+= TARGET_PAGE_SIZE
;
1481 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1482 unsigned long start
)
1485 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1486 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1487 if (addr
== start
) {
1488 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1493 /* update the TLB corresponding to virtual page vaddr and phys addr
1494 addr so that it is no longer dirty */
1495 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1497 CPUState
*env
= cpu_single_env
;
1500 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1502 addr
&= TARGET_PAGE_MASK
;
1503 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1504 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1505 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1508 /* add a new TLB entry. At most one entry for a given virtual address
1509 is permitted. Return 0 if OK or 2 if the page could not be mapped
1510 (can only happen in non SOFTMMU mode for I/O pages or pages
1511 conflicting with the host address space). */
1512 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1513 target_phys_addr_t paddr
, int prot
,
1514 int is_user
, int is_softmmu
)
1518 TranslationBlock
*first_tb
;
1520 target_ulong address
;
1521 unsigned long addend
;
1524 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1527 pd
= IO_MEM_UNASSIGNED
;
1530 pd
= p
->phys_offset
;
1531 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1532 /* NOTE: we also allocate the page at this stage */
1533 p1
= page_find_alloc(pd
>> TARGET_PAGE_BITS
);
1534 first_tb
= p1
->first_tb
;
1537 #if defined(DEBUG_TLB)
1538 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1539 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1543 #if !defined(CONFIG_SOFTMMU)
1547 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1548 /* IO memory case */
1549 address
= vaddr
| pd
;
1552 /* standard memory */
1554 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1557 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1559 if (prot
& PAGE_READ
) {
1560 env
->tlb_read
[is_user
][index
].address
= address
;
1561 env
->tlb_read
[is_user
][index
].addend
= addend
;
1563 env
->tlb_read
[is_user
][index
].address
= -1;
1564 env
->tlb_read
[is_user
][index
].addend
= -1;
1566 if (prot
& PAGE_WRITE
) {
1567 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1568 /* ROM: access is ignored (same as unassigned) */
1569 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1570 env
->tlb_write
[is_user
][index
].addend
= addend
;
1572 /* XXX: the PowerPC code seems not ready to handle
1573 self modifying code with DCBI */
1574 #if defined(TARGET_HAS_SMC) || 1
1576 /* if code is present, we use a specific memory
1577 handler. It works only for physical memory access */
1578 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1579 env
->tlb_write
[is_user
][index
].addend
= addend
;
1582 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1583 !cpu_physical_memory_is_dirty(pd
)) {
1584 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1585 env
->tlb_write
[is_user
][index
].addend
= addend
;
1587 env
->tlb_write
[is_user
][index
].address
= address
;
1588 env
->tlb_write
[is_user
][index
].addend
= addend
;
1591 env
->tlb_write
[is_user
][index
].address
= -1;
1592 env
->tlb_write
[is_user
][index
].addend
= -1;
1595 #if !defined(CONFIG_SOFTMMU)
1597 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1598 /* IO access: no mapping is done as it will be handled by the
1600 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1605 if (vaddr
>= MMAP_AREA_END
) {
1608 if (prot
& PROT_WRITE
) {
1609 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1610 #if defined(TARGET_HAS_SMC) || 1
1613 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1614 !cpu_physical_memory_is_dirty(pd
))) {
1615 /* ROM: we do as if code was inside */
1616 /* if code is present, we only map as read only and save the
1620 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1623 vp
->valid_tag
= virt_valid_tag
;
1624 prot
&= ~PAGE_WRITE
;
1627 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1628 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1629 if (map_addr
== MAP_FAILED
) {
1630 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1640 /* called from signal handler: invalidate the code and unprotect the
1641 page. Return TRUE if the fault was succesfully handled. */
1642 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1644 #if !defined(CONFIG_SOFTMMU)
1647 #if defined(DEBUG_TLB)
1648 printf("page_unprotect: addr=0x%08x\n", addr
);
1650 addr
&= TARGET_PAGE_MASK
;
1652 /* if it is not mapped, no need to worry here */
1653 if (addr
>= MMAP_AREA_END
)
1655 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1658 /* NOTE: in this case, validate_tag is _not_ tested as it
1659 validates only the code TLB */
1660 if (vp
->valid_tag
!= virt_valid_tag
)
1662 if (!(vp
->prot
& PAGE_WRITE
))
1664 #if defined(DEBUG_TLB)
1665 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1666 addr
, vp
->phys_addr
, vp
->prot
);
1668 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1669 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1670 (unsigned long)addr
, vp
->prot
);
1671 /* set the dirty bit */
1672 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1673 /* flush the code inside */
1674 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1683 void tlb_flush(CPUState
*env
, int flush_global
)
1687 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1691 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1692 target_phys_addr_t paddr
, int prot
,
1693 int is_user
, int is_softmmu
)
1698 /* dump memory mappings */
1699 void page_dump(FILE *f
)
1701 unsigned long start
, end
;
1702 int i
, j
, prot
, prot1
;
1705 fprintf(f
, "%-8s %-8s %-8s %s\n",
1706 "start", "end", "size", "prot");
1710 for(i
= 0; i
<= L1_SIZE
; i
++) {
1715 for(j
= 0;j
< L2_SIZE
; j
++) {
1720 if (prot1
!= prot
) {
1721 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1723 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1724 start
, end
, end
- start
,
1725 prot
& PAGE_READ
? 'r' : '-',
1726 prot
& PAGE_WRITE
? 'w' : '-',
1727 prot
& PAGE_EXEC
? 'x' : '-');
1741 int page_get_flags(unsigned long address
)
1745 p
= page_find(address
>> TARGET_PAGE_BITS
);
1751 /* modify the flags of a page and invalidate the code if
1752 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1753 depending on PAGE_WRITE */
1754 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1759 start
= start
& TARGET_PAGE_MASK
;
1760 end
= TARGET_PAGE_ALIGN(end
);
1761 if (flags
& PAGE_WRITE
)
1762 flags
|= PAGE_WRITE_ORG
;
1763 spin_lock(&tb_lock
);
1764 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1765 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1766 /* if the write protection is set, then we invalidate the code
1768 if (!(p
->flags
& PAGE_WRITE
) &&
1769 (flags
& PAGE_WRITE
) &&
1771 tb_invalidate_phys_page(addr
, 0, NULL
);
1775 spin_unlock(&tb_lock
);
1778 /* called from signal handler: invalidate the code and unprotect the
1779 page. Return TRUE if the fault was succesfully handled. */
1780 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1782 unsigned int page_index
, prot
, pindex
;
1784 unsigned long host_start
, host_end
, addr
;
1786 host_start
= address
& qemu_host_page_mask
;
1787 page_index
= host_start
>> TARGET_PAGE_BITS
;
1788 p1
= page_find(page_index
);
1791 host_end
= host_start
+ qemu_host_page_size
;
1794 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1798 /* if the page was really writable, then we change its
1799 protection back to writable */
1800 if (prot
& PAGE_WRITE_ORG
) {
1801 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1802 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1803 mprotect((void *)host_start
, qemu_host_page_size
,
1804 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1805 p1
[pindex
].flags
|= PAGE_WRITE
;
1806 /* and since the content will be modified, we must invalidate
1807 the corresponding translated code. */
1808 tb_invalidate_phys_page(address
, pc
, puc
);
1809 #ifdef DEBUG_TB_CHECK
1810 tb_invalidate_check(address
);
1818 /* call this function when system calls directly modify a memory area */
1819 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1821 unsigned long start
, end
, addr
;
1823 start
= (unsigned long)data
;
1824 end
= start
+ data_size
;
1825 start
&= TARGET_PAGE_MASK
;
1826 end
= TARGET_PAGE_ALIGN(end
);
1827 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1828 page_unprotect(addr
, 0, NULL
);
1832 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1835 #endif /* defined(CONFIG_USER_ONLY) */
1837 /* register physical memory. 'size' must be a multiple of the target
1838 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1840 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1842 unsigned long phys_offset
)
1844 unsigned long addr
, end_addr
;
1847 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1848 end_addr
= start_addr
+ size
;
1849 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1850 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1851 p
->phys_offset
= phys_offset
;
1852 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1853 phys_offset
+= TARGET_PAGE_SIZE
;
1857 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1862 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1866 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1867 unassigned_mem_readb
,
1868 unassigned_mem_readb
,
1869 unassigned_mem_readb
,
1872 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1873 unassigned_mem_writeb
,
1874 unassigned_mem_writeb
,
1875 unassigned_mem_writeb
,
1878 /* self modifying code support in soft mmu mode : writing to a page
1879 containing code comes to these functions */
1881 static void code_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1883 unsigned long phys_addr
;
1885 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1886 #if !defined(CONFIG_USER_ONLY)
1887 tb_invalidate_phys_page_fast(phys_addr
, 1);
1889 stb_p((uint8_t *)(long)addr
, val
);
1890 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1893 static void code_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1895 unsigned long phys_addr
;
1897 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1898 #if !defined(CONFIG_USER_ONLY)
1899 tb_invalidate_phys_page_fast(phys_addr
, 2);
1901 stw_p((uint8_t *)(long)addr
, val
);
1902 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1905 static void code_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1907 unsigned long phys_addr
;
1909 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1910 #if !defined(CONFIG_USER_ONLY)
1911 tb_invalidate_phys_page_fast(phys_addr
, 4);
1913 stl_p((uint8_t *)(long)addr
, val
);
1914 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1917 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1918 NULL
, /* never used */
1919 NULL
, /* never used */
1920 NULL
, /* never used */
1923 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1929 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1931 stb_p((uint8_t *)(long)addr
, val
);
1932 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1935 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1937 stw_p((uint8_t *)(long)addr
, val
);
1938 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1941 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1943 stl_p((uint8_t *)(long)addr
, val
);
1944 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1947 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1948 notdirty_mem_writeb
,
1949 notdirty_mem_writew
,
1950 notdirty_mem_writel
,
1953 static void io_mem_init(void)
1955 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
, NULL
);
1956 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1957 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
, NULL
);
1958 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
, NULL
);
1961 /* alloc dirty bits array */
1962 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1965 /* mem_read and mem_write are arrays of functions containing the
1966 function to access byte (index 0), word (index 1) and dword (index
1967 2). All functions must be supplied. If io_index is non zero, the
1968 corresponding io zone is modified. If it is zero, a new io zone is
1969 allocated. The return value can be used with
1970 cpu_register_physical_memory(). (-1) is returned if error. */
1971 int cpu_register_io_memory(int io_index
,
1972 CPUReadMemoryFunc
**mem_read
,
1973 CPUWriteMemoryFunc
**mem_write
,
1978 if (io_index
<= 0) {
1979 if (io_index
>= IO_MEM_NB_ENTRIES
)
1981 io_index
= io_mem_nb
++;
1983 if (io_index
>= IO_MEM_NB_ENTRIES
)
1987 for(i
= 0;i
< 3; i
++) {
1988 io_mem_read
[io_index
][i
] = mem_read
[i
];
1989 io_mem_write
[io_index
][i
] = mem_write
[i
];
1991 io_mem_opaque
[io_index
] = opaque
;
1992 return io_index
<< IO_MEM_SHIFT
;
1995 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
1997 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2000 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2002 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2005 /* physical memory access (slow version, mainly for debug) */
2006 #if defined(CONFIG_USER_ONLY)
2007 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2008 int len
, int is_write
)
2014 page
= addr
& TARGET_PAGE_MASK
;
2015 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2018 flags
= page_get_flags(page
);
2019 if (!(flags
& PAGE_VALID
))
2022 if (!(flags
& PAGE_WRITE
))
2024 memcpy((uint8_t *)addr
, buf
, len
);
2026 if (!(flags
& PAGE_READ
))
2028 memcpy(buf
, (uint8_t *)addr
, len
);
2036 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2037 int len
, int is_write
)
2042 target_phys_addr_t page
;
2047 page
= addr
& TARGET_PAGE_MASK
;
2048 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2051 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2053 pd
= IO_MEM_UNASSIGNED
;
2055 pd
= p
->phys_offset
;
2059 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2060 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2061 if (l
>= 4 && ((addr
& 3) == 0)) {
2062 /* 32 bit read access */
2064 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2066 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2067 /* 16 bit read access */
2069 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2074 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2078 unsigned long addr1
;
2079 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2081 ptr
= phys_ram_base
+ addr1
;
2082 memcpy(ptr
, buf
, l
);
2083 /* invalidate code */
2084 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2086 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] = 1;
2089 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2090 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2092 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2093 if (l
>= 4 && ((addr
& 3) == 0)) {
2094 /* 32 bit read access */
2095 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2098 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2099 /* 16 bit read access */
2100 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2105 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2111 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2112 (addr
& ~TARGET_PAGE_MASK
);
2113 memcpy(buf
, ptr
, l
);
2123 /* virtual memory access for debug */
2124 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2125 uint8_t *buf
, int len
, int is_write
)
2128 target_ulong page
, phys_addr
;
2131 page
= addr
& TARGET_PAGE_MASK
;
2132 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2133 /* if no physical page mapped, return an error */
2134 if (phys_addr
== -1)
2136 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2139 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2148 void dump_exec_info(FILE *f
,
2149 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2151 int i
, target_code_size
, max_target_code_size
;
2152 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2153 TranslationBlock
*tb
;
2155 target_code_size
= 0;
2156 max_target_code_size
= 0;
2158 direct_jmp_count
= 0;
2159 direct_jmp2_count
= 0;
2160 for(i
= 0; i
< nb_tbs
; i
++) {
2162 target_code_size
+= tb
->size
;
2163 if (tb
->size
> max_target_code_size
)
2164 max_target_code_size
= tb
->size
;
2165 if (tb
->page_addr
[1] != -1)
2167 if (tb
->tb_next_offset
[0] != 0xffff) {
2169 if (tb
->tb_next_offset
[1] != 0xffff) {
2170 direct_jmp2_count
++;
2174 /* XXX: avoid using doubles ? */
2175 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2176 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2177 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2178 max_target_code_size
);
2179 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2180 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2181 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2182 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2184 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2185 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2187 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2189 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2190 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2191 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2192 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2195 #if !defined(CONFIG_USER_ONLY)
2197 #define MMUSUFFIX _cmmu
2198 #define GETPC() NULL
2199 #define env cpu_single_env
2200 #define SOFTMMU_CODE_ACCESS
2203 #include "softmmu_template.h"
2206 #include "softmmu_template.h"
2209 #include "softmmu_template.h"
2212 #include "softmmu_template.h"