2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
37 //#define DEBUG_TB_INVALIDATE
41 /* make various TB consistency checks */
42 //#define DEBUG_TB_CHECK
43 //#define DEBUG_TLB_CHECK
45 /* threshold to flush the translated code buffer */
46 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48 #define SMC_BITMAP_USE_THRESHOLD 10
50 #define MMAP_AREA_START 0x00000000
51 #define MMAP_AREA_END 0xa8000000
53 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
54 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
55 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
57 /* any access to the tbs or the page table must use this lock */
58 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
60 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
61 uint8_t *code_gen_ptr
;
65 uint8_t *phys_ram_base
;
66 uint8_t *phys_ram_dirty
;
68 typedef struct PageDesc
{
69 /* list of TBs intersecting this ram page */
70 TranslationBlock
*first_tb
;
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count
;
75 #if defined(CONFIG_USER_ONLY)
80 typedef struct PhysPageDesc
{
81 /* offset in host memory of the page + io_index in the low 12 bits */
82 unsigned long phys_offset
;
85 typedef struct VirtPageDesc
{
86 /* physical address of code page. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 target_ulong phys_addr
;
89 unsigned int valid_tag
;
90 #if !defined(CONFIG_SOFTMMU)
91 /* original page access rights. It is valid only if 'valid_tag'
92 matches 'virt_valid_tag' */
98 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100 #define L1_SIZE (1 << L1_BITS)
101 #define L2_SIZE (1 << L2_BITS)
103 static void io_mem_init(void);
105 unsigned long qemu_real_host_page_size
;
106 unsigned long qemu_host_page_bits
;
107 unsigned long qemu_host_page_size
;
108 unsigned long qemu_host_page_mask
;
110 /* XXX: for system emulation, it could just be an array */
111 static PageDesc
*l1_map
[L1_SIZE
];
112 static PhysPageDesc
*l1_phys_map
[L1_SIZE
];
114 #if !defined(CONFIG_USER_ONLY)
115 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
116 static unsigned int virt_valid_tag
;
119 /* io memory support */
120 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
121 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
122 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
123 static int io_mem_nb
;
126 char *logfilename
= "/tmp/qemu.log";
130 static void page_init(void)
132 /* NOTE: we can always suppose that qemu_host_page_size >=
136 SYSTEM_INFO system_info
;
139 GetSystemInfo(&system_info
);
140 qemu_real_host_page_size
= system_info
.dwPageSize
;
142 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
143 PAGE_EXECUTE_READWRITE
, &old_protect
);
146 qemu_real_host_page_size
= getpagesize();
148 unsigned long start
, end
;
150 start
= (unsigned long)code_gen_buffer
;
151 start
&= ~(qemu_real_host_page_size
- 1);
153 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
154 end
+= qemu_real_host_page_size
- 1;
155 end
&= ~(qemu_real_host_page_size
- 1);
157 mprotect((void *)start
, end
- start
,
158 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
162 if (qemu_host_page_size
== 0)
163 qemu_host_page_size
= qemu_real_host_page_size
;
164 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
165 qemu_host_page_size
= TARGET_PAGE_SIZE
;
166 qemu_host_page_bits
= 0;
167 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
168 qemu_host_page_bits
++;
169 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
170 #if !defined(CONFIG_USER_ONLY)
175 static inline PageDesc
*page_find_alloc(unsigned int index
)
179 lp
= &l1_map
[index
>> L2_BITS
];
182 /* allocate if not found */
183 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
184 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
187 return p
+ (index
& (L2_SIZE
- 1));
190 static inline PageDesc
*page_find(unsigned int index
)
194 p
= l1_map
[index
>> L2_BITS
];
197 return p
+ (index
& (L2_SIZE
- 1));
200 static inline PhysPageDesc
*phys_page_find_alloc(unsigned int index
)
202 PhysPageDesc
**lp
, *p
;
204 lp
= &l1_phys_map
[index
>> L2_BITS
];
207 /* allocate if not found */
208 p
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
209 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
212 return p
+ (index
& (L2_SIZE
- 1));
215 static inline PhysPageDesc
*phys_page_find(unsigned int index
)
219 p
= l1_phys_map
[index
>> L2_BITS
];
222 return p
+ (index
& (L2_SIZE
- 1));
225 #if !defined(CONFIG_USER_ONLY)
226 static void tlb_protect_code(CPUState
*env
, target_ulong addr
);
227 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
);
229 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
231 VirtPageDesc
**lp
, *p
;
233 lp
= &l1_virt_map
[index
>> L2_BITS
];
236 /* allocate if not found */
237 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
238 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
241 return p
+ (index
& (L2_SIZE
- 1));
244 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
248 p
= l1_virt_map
[index
>> L2_BITS
];
251 return p
+ (index
& (L2_SIZE
- 1));
254 static void virt_page_flush(void)
261 if (virt_valid_tag
== 0) {
263 for(i
= 0; i
< L1_SIZE
; i
++) {
266 for(j
= 0; j
< L2_SIZE
; j
++)
273 static void virt_page_flush(void)
278 void cpu_exec_init(void)
281 code_gen_ptr
= code_gen_buffer
;
287 static inline void invalidate_page_bitmap(PageDesc
*p
)
289 if (p
->code_bitmap
) {
290 qemu_free(p
->code_bitmap
);
291 p
->code_bitmap
= NULL
;
293 p
->code_write_count
= 0;
296 /* set to NULL all the 'first_tb' fields in all PageDescs */
297 static void page_flush_tb(void)
302 for(i
= 0; i
< L1_SIZE
; i
++) {
305 for(j
= 0; j
< L2_SIZE
; j
++) {
307 invalidate_page_bitmap(p
);
314 /* flush all the translation blocks */
315 /* XXX: tb_flush is currently not thread safe */
316 void tb_flush(CPUState
*env
)
319 #if defined(DEBUG_FLUSH)
320 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
321 code_gen_ptr
- code_gen_buffer
,
323 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
326 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
330 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
331 tb_phys_hash
[i
] = NULL
;
334 code_gen_ptr
= code_gen_buffer
;
335 /* XXX: flush processor icache at this point if cache flush is
339 #ifdef DEBUG_TB_CHECK
341 static void tb_invalidate_check(unsigned long address
)
343 TranslationBlock
*tb
;
345 address
&= TARGET_PAGE_MASK
;
346 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
347 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
348 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
349 address
>= tb
->pc
+ tb
->size
)) {
350 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
351 address
, tb
->pc
, tb
->size
);
357 /* verify that all the pages have correct rights for code */
358 static void tb_page_check(void)
360 TranslationBlock
*tb
;
361 int i
, flags1
, flags2
;
363 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
364 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
365 flags1
= page_get_flags(tb
->pc
);
366 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
367 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
368 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
369 tb
->pc
, tb
->size
, flags1
, flags2
);
375 void tb_jmp_check(TranslationBlock
*tb
)
377 TranslationBlock
*tb1
;
380 /* suppress any remaining jumps to this TB */
384 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
387 tb1
= tb1
->jmp_next
[n1
];
389 /* check end of list */
391 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
397 /* invalidate one TB */
398 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
401 TranslationBlock
*tb1
;
405 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
408 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
412 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
414 TranslationBlock
*tb1
;
420 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
422 *ptb
= tb1
->page_next
[n1
];
425 ptb
= &tb1
->page_next
[n1
];
429 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
431 TranslationBlock
*tb1
, **ptb
;
434 ptb
= &tb
->jmp_next
[n
];
437 /* find tb(n) in circular list */
441 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
442 if (n1
== n
&& tb1
== tb
)
445 ptb
= &tb1
->jmp_first
;
447 ptb
= &tb1
->jmp_next
[n1
];
450 /* now we can suppress tb(n) from the list */
451 *ptb
= tb
->jmp_next
[n
];
453 tb
->jmp_next
[n
] = NULL
;
457 /* reset the jump entry 'n' of a TB so that it is not chained to
459 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
461 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
464 static inline void tb_invalidate(TranslationBlock
*tb
)
467 TranslationBlock
*tb1
, *tb2
, **ptb
;
469 tb_invalidated_flag
= 1;
471 /* remove the TB from the hash list */
472 h
= tb_hash_func(tb
->pc
);
476 /* NOTE: the TB is not necessarily linked in the hash. It
477 indicates that it is not currently used */
481 *ptb
= tb1
->hash_next
;
484 ptb
= &tb1
->hash_next
;
487 /* suppress this TB from the two jump lists */
488 tb_jmp_remove(tb
, 0);
489 tb_jmp_remove(tb
, 1);
491 /* suppress any remaining jumps to this TB */
497 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
498 tb2
= tb1
->jmp_next
[n1
];
499 tb_reset_jump(tb1
, n1
);
500 tb1
->jmp_next
[n1
] = NULL
;
503 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
506 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
510 target_ulong phys_pc
;
512 /* remove the TB from the hash list */
513 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
514 h
= tb_phys_hash_func(phys_pc
);
515 tb_remove(&tb_phys_hash
[h
], tb
,
516 offsetof(TranslationBlock
, phys_hash_next
));
518 /* remove the TB from the page list */
519 if (tb
->page_addr
[0] != page_addr
) {
520 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
521 tb_page_remove(&p
->first_tb
, tb
);
522 invalidate_page_bitmap(p
);
524 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
525 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
526 tb_page_remove(&p
->first_tb
, tb
);
527 invalidate_page_bitmap(p
);
533 static inline void set_bits(uint8_t *tab
, int start
, int len
)
539 mask
= 0xff << (start
& 7);
540 if ((start
& ~7) == (end
& ~7)) {
542 mask
&= ~(0xff << (end
& 7));
547 start
= (start
+ 8) & ~7;
549 while (start
< end1
) {
554 mask
= ~(0xff << (end
& 7));
560 static void build_page_bitmap(PageDesc
*p
)
562 int n
, tb_start
, tb_end
;
563 TranslationBlock
*tb
;
565 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
568 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
573 tb
= (TranslationBlock
*)((long)tb
& ~3);
574 /* NOTE: this is subtle as a TB may span two physical pages */
576 /* NOTE: tb_end may be after the end of the page, but
577 it is not a problem */
578 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
579 tb_end
= tb_start
+ tb
->size
;
580 if (tb_end
> TARGET_PAGE_SIZE
)
581 tb_end
= TARGET_PAGE_SIZE
;
584 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
586 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
587 tb
= tb
->page_next
[n
];
591 #ifdef TARGET_HAS_PRECISE_SMC
593 static void tb_gen_code(CPUState
*env
,
594 target_ulong pc
, target_ulong cs_base
, int flags
,
597 TranslationBlock
*tb
;
599 target_ulong phys_pc
, phys_page2
, virt_page2
;
602 phys_pc
= get_phys_addr_code(env
, (unsigned long)pc
);
603 tb
= tb_alloc((unsigned long)pc
);
605 /* flush must be done */
607 /* cannot fail at this point */
608 tb
= tb_alloc((unsigned long)pc
);
610 tc_ptr
= code_gen_ptr
;
612 tb
->cs_base
= cs_base
;
615 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
616 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
618 /* check next page if needed */
619 virt_page2
= ((unsigned long)pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
621 if (((unsigned long)pc
& TARGET_PAGE_MASK
) != virt_page2
) {
622 phys_page2
= get_phys_addr_code(env
, virt_page2
);
624 tb_link_phys(tb
, phys_pc
, phys_page2
);
628 /* invalidate all TBs which intersect with the target physical page
629 starting in range [start;end[. NOTE: start and end must refer to
630 the same physical page. 'is_cpu_write_access' should be true if called
631 from a real cpu write access: the virtual CPU will exit the current
632 TB if code is modified inside this TB. */
633 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
634 int is_cpu_write_access
)
636 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
637 CPUState
*env
= cpu_single_env
;
639 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
640 target_ulong tb_start
, tb_end
;
641 target_ulong current_pc
, current_cs_base
;
643 p
= page_find(start
>> TARGET_PAGE_BITS
);
646 if (!p
->code_bitmap
&&
647 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
648 is_cpu_write_access
) {
649 /* build code bitmap */
650 build_page_bitmap(p
);
653 /* we remove all the TBs in the range [start, end[ */
654 /* XXX: see if in some cases it could be faster to invalidate all the code */
655 current_tb_not_found
= is_cpu_write_access
;
656 current_tb_modified
= 0;
657 current_tb
= NULL
; /* avoid warning */
658 current_pc
= 0; /* avoid warning */
659 current_cs_base
= 0; /* avoid warning */
660 current_flags
= 0; /* avoid warning */
664 tb
= (TranslationBlock
*)((long)tb
& ~3);
665 tb_next
= tb
->page_next
[n
];
666 /* NOTE: this is subtle as a TB may span two physical pages */
668 /* NOTE: tb_end may be after the end of the page, but
669 it is not a problem */
670 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
671 tb_end
= tb_start
+ tb
->size
;
673 tb_start
= tb
->page_addr
[1];
674 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
676 if (!(tb_end
<= start
|| tb_start
>= end
)) {
677 #ifdef TARGET_HAS_PRECISE_SMC
678 if (current_tb_not_found
) {
679 current_tb_not_found
= 0;
681 if (env
->mem_write_pc
) {
682 /* now we have a real cpu fault */
683 current_tb
= tb_find_pc(env
->mem_write_pc
);
686 if (current_tb
== tb
&&
687 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
688 /* If we are modifying the current TB, we must stop
689 its execution. We could be more precise by checking
690 that the modification is after the current PC, but it
691 would require a specialized function to partially
692 restore the CPU state */
694 current_tb_modified
= 1;
695 cpu_restore_state(current_tb
, env
,
696 env
->mem_write_pc
, NULL
);
697 #if defined(TARGET_I386)
698 current_flags
= env
->hflags
;
699 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
700 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
701 current_pc
= current_cs_base
+ env
->eip
;
703 #error unsupported CPU
706 #endif /* TARGET_HAS_PRECISE_SMC */
707 saved_tb
= env
->current_tb
;
708 env
->current_tb
= NULL
;
709 tb_phys_invalidate(tb
, -1);
710 env
->current_tb
= saved_tb
;
711 if (env
->interrupt_request
&& env
->current_tb
)
712 cpu_interrupt(env
, env
->interrupt_request
);
716 #if !defined(CONFIG_USER_ONLY)
717 /* if no code remaining, no need to continue to use slow writes */
719 invalidate_page_bitmap(p
);
720 if (is_cpu_write_access
) {
721 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
725 #ifdef TARGET_HAS_PRECISE_SMC
726 if (current_tb_modified
) {
727 /* we generate a block containing just the instruction
728 modifying the memory. It will ensure that it cannot modify
730 env
->current_tb
= NULL
;
731 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
733 cpu_resume_from_signal(env
, NULL
);
738 /* len must be <= 8 and start must be a multiple of len */
739 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
746 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
747 cpu_single_env
->mem_write_vaddr
, len
,
749 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
753 p
= page_find(start
>> TARGET_PAGE_BITS
);
756 if (p
->code_bitmap
) {
757 offset
= start
& ~TARGET_PAGE_MASK
;
758 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
759 if (b
& ((1 << len
) - 1))
763 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
767 #if !defined(CONFIG_SOFTMMU)
768 static void tb_invalidate_phys_page(target_ulong addr
,
769 unsigned long pc
, void *puc
)
771 int n
, current_flags
, current_tb_modified
;
772 target_ulong current_pc
, current_cs_base
;
774 TranslationBlock
*tb
, *current_tb
;
775 #ifdef TARGET_HAS_PRECISE_SMC
776 CPUState
*env
= cpu_single_env
;
779 addr
&= TARGET_PAGE_MASK
;
780 p
= page_find(addr
>> TARGET_PAGE_BITS
);
784 current_tb_modified
= 0;
786 current_pc
= 0; /* avoid warning */
787 current_cs_base
= 0; /* avoid warning */
788 current_flags
= 0; /* avoid warning */
789 #ifdef TARGET_HAS_PRECISE_SMC
791 current_tb
= tb_find_pc(pc
);
796 tb
= (TranslationBlock
*)((long)tb
& ~3);
797 #ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb
== tb
&&
799 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
800 /* If we are modifying the current TB, we must stop
801 its execution. We could be more precise by checking
802 that the modification is after the current PC, but it
803 would require a specialized function to partially
804 restore the CPU state */
806 current_tb_modified
= 1;
807 cpu_restore_state(current_tb
, env
, pc
, puc
);
808 #if defined(TARGET_I386)
809 current_flags
= env
->hflags
;
810 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
811 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
812 current_pc
= current_cs_base
+ env
->eip
;
814 #error unsupported CPU
817 #endif /* TARGET_HAS_PRECISE_SMC */
818 tb_phys_invalidate(tb
, addr
);
819 tb
= tb
->page_next
[n
];
822 #ifdef TARGET_HAS_PRECISE_SMC
823 if (current_tb_modified
) {
824 /* we generate a block containing just the instruction
825 modifying the memory. It will ensure that it cannot modify
827 env
->current_tb
= NULL
;
828 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
830 cpu_resume_from_signal(env
, puc
);
836 /* add the tb in the target page and protect it if necessary */
837 static inline void tb_alloc_page(TranslationBlock
*tb
,
838 unsigned int n
, unsigned int page_addr
)
841 TranslationBlock
*last_first_tb
;
843 tb
->page_addr
[n
] = page_addr
;
844 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
845 tb
->page_next
[n
] = p
->first_tb
;
846 last_first_tb
= p
->first_tb
;
847 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
848 invalidate_page_bitmap(p
);
850 #if defined(TARGET_HAS_SMC) || 1
852 #if defined(CONFIG_USER_ONLY)
853 if (p
->flags
& PAGE_WRITE
) {
854 unsigned long host_start
, host_end
, addr
;
857 /* force the host page as non writable (writes will have a
858 page fault + mprotect overhead) */
859 host_start
= page_addr
& qemu_host_page_mask
;
860 host_end
= host_start
+ qemu_host_page_size
;
862 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
863 prot
|= page_get_flags(addr
);
864 mprotect((void *)host_start
, qemu_host_page_size
,
865 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
866 #ifdef DEBUG_TB_INVALIDATE
867 printf("protecting code page: 0x%08lx\n",
870 p
->flags
&= ~PAGE_WRITE
;
873 /* if some code is already present, then the pages are already
874 protected. So we handle the case where only the first TB is
875 allocated in a physical page */
876 if (!last_first_tb
) {
877 target_ulong virt_addr
;
879 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
880 tlb_protect_code(cpu_single_env
, virt_addr
);
884 #endif /* TARGET_HAS_SMC */
887 /* Allocate a new translation block. Flush the translation buffer if
888 too many translation blocks or too much generated code. */
889 TranslationBlock
*tb_alloc(unsigned long pc
)
891 TranslationBlock
*tb
;
893 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
894 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
902 /* add a new TB and link it to the physical page tables. phys_page2 is
903 (-1) to indicate that only one page contains the TB. */
904 void tb_link_phys(TranslationBlock
*tb
,
905 target_ulong phys_pc
, target_ulong phys_page2
)
908 TranslationBlock
**ptb
;
910 /* add in the physical hash table */
911 h
= tb_phys_hash_func(phys_pc
);
912 ptb
= &tb_phys_hash
[h
];
913 tb
->phys_hash_next
= *ptb
;
916 /* add in the page list */
917 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
918 if (phys_page2
!= -1)
919 tb_alloc_page(tb
, 1, phys_page2
);
921 tb
->page_addr
[1] = -1;
922 #ifdef DEBUG_TB_CHECK
927 /* link the tb with the other TBs */
928 void tb_link(TranslationBlock
*tb
)
930 #if !defined(CONFIG_USER_ONLY)
935 /* save the code memory mappings (needed to invalidate the code) */
936 addr
= tb
->pc
& TARGET_PAGE_MASK
;
937 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
938 #ifdef DEBUG_TLB_CHECK
939 if (vp
->valid_tag
== virt_valid_tag
&&
940 vp
->phys_addr
!= tb
->page_addr
[0]) {
941 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
942 addr
, tb
->page_addr
[0], vp
->phys_addr
);
945 vp
->phys_addr
= tb
->page_addr
[0];
946 if (vp
->valid_tag
!= virt_valid_tag
) {
947 vp
->valid_tag
= virt_valid_tag
;
948 #if !defined(CONFIG_SOFTMMU)
953 if (tb
->page_addr
[1] != -1) {
954 addr
+= TARGET_PAGE_SIZE
;
955 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
956 #ifdef DEBUG_TLB_CHECK
957 if (vp
->valid_tag
== virt_valid_tag
&&
958 vp
->phys_addr
!= tb
->page_addr
[1]) {
959 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
960 addr
, tb
->page_addr
[1], vp
->phys_addr
);
963 vp
->phys_addr
= tb
->page_addr
[1];
964 if (vp
->valid_tag
!= virt_valid_tag
) {
965 vp
->valid_tag
= virt_valid_tag
;
966 #if !defined(CONFIG_SOFTMMU)
974 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
975 tb
->jmp_next
[0] = NULL
;
976 tb
->jmp_next
[1] = NULL
;
978 tb
->cflags
&= ~CF_FP_USED
;
979 if (tb
->cflags
& CF_TB_FP_USED
)
980 tb
->cflags
|= CF_FP_USED
;
983 /* init original jump addresses */
984 if (tb
->tb_next_offset
[0] != 0xffff)
985 tb_reset_jump(tb
, 0);
986 if (tb
->tb_next_offset
[1] != 0xffff)
987 tb_reset_jump(tb
, 1);
990 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
991 tb[1].tc_ptr. Return NULL if not found */
992 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
996 TranslationBlock
*tb
;
1000 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1001 tc_ptr
>= (unsigned long)code_gen_ptr
)
1003 /* binary search (cf Knuth) */
1006 while (m_min
<= m_max
) {
1007 m
= (m_min
+ m_max
) >> 1;
1009 v
= (unsigned long)tb
->tc_ptr
;
1012 else if (tc_ptr
< v
) {
1021 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1023 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1025 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1028 tb1
= tb
->jmp_next
[n
];
1030 /* find head of list */
1033 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1036 tb1
= tb1
->jmp_next
[n1
];
1038 /* we are now sure now that tb jumps to tb1 */
1041 /* remove tb from the jmp_first list */
1042 ptb
= &tb_next
->jmp_first
;
1046 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1047 if (n1
== n
&& tb1
== tb
)
1049 ptb
= &tb1
->jmp_next
[n1
];
1051 *ptb
= tb
->jmp_next
[n
];
1052 tb
->jmp_next
[n
] = NULL
;
1054 /* suppress the jump to next tb in generated code */
1055 tb_reset_jump(tb
, n
);
1057 /* suppress jumps in the tb on which we could have jumped */
1058 tb_reset_jump_recursive(tb_next
);
1062 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1064 tb_reset_jump_recursive2(tb
, 0);
1065 tb_reset_jump_recursive2(tb
, 1);
1068 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1070 target_ulong phys_addr
;
1072 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
1073 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1076 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1077 breakpoint is reached */
1078 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1080 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1083 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1084 if (env
->breakpoints
[i
] == pc
)
1088 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1090 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1092 breakpoint_invalidate(env
, pc
);
1099 /* remove a breakpoint */
1100 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1102 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1104 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1105 if (env
->breakpoints
[i
] == pc
)
1110 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
1111 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
1112 env
->nb_breakpoints
--;
1114 breakpoint_invalidate(env
, pc
);
1121 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1122 CPU loop after each instruction */
1123 void cpu_single_step(CPUState
*env
, int enabled
)
1125 #if defined(TARGET_I386) || defined(TARGET_PPC) || defined(TARGET_SPARC)
1126 if (env
->singlestep_enabled
!= enabled
) {
1127 env
->singlestep_enabled
= enabled
;
1128 /* must flush all the translated code to avoid inconsistancies */
1129 /* XXX: only flush what is necessary */
1135 /* enable or disable low levels log */
1136 void cpu_set_log(int log_flags
)
1138 loglevel
= log_flags
;
1139 if (loglevel
&& !logfile
) {
1140 logfile
= fopen(logfilename
, "w");
1142 perror(logfilename
);
1145 #if !defined(CONFIG_SOFTMMU)
1146 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1148 static uint8_t logfile_buf
[4096];
1149 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1152 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1157 void cpu_set_log_filename(const char *filename
)
1159 logfilename
= strdup(filename
);
1162 /* mask must never be zero, except for A20 change call */
1163 void cpu_interrupt(CPUState
*env
, int mask
)
1165 TranslationBlock
*tb
;
1166 static int interrupt_lock
;
1168 env
->interrupt_request
|= mask
;
1169 /* if the cpu is currently executing code, we must unlink it and
1170 all the potentially executing TB */
1171 tb
= env
->current_tb
;
1172 if (tb
&& !testandset(&interrupt_lock
)) {
1173 env
->current_tb
= NULL
;
1174 tb_reset_jump_recursive(tb
);
1179 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1181 env
->interrupt_request
&= ~mask
;
1184 CPULogItem cpu_log_items
[] = {
1185 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1186 "show generated host assembly code for each compiled TB" },
1187 { CPU_LOG_TB_IN_ASM
, "in_asm",
1188 "show target assembly code for each compiled TB" },
1189 { CPU_LOG_TB_OP
, "op",
1190 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1192 { CPU_LOG_TB_OP_OPT
, "op_opt",
1193 "show micro ops after optimization for each compiled TB" },
1195 { CPU_LOG_INT
, "int",
1196 "show interrupts/exceptions in short format" },
1197 { CPU_LOG_EXEC
, "exec",
1198 "show trace before each executed TB (lots of logs)" },
1199 { CPU_LOG_TB_CPU
, "cpu",
1200 "show CPU state before bloc translation" },
1202 { CPU_LOG_PCALL
, "pcall",
1203 "show protected mode far calls/returns/exceptions" },
1205 { CPU_LOG_IOPORT
, "ioport",
1206 "show all i/o ports accesses" },
1210 static int cmp1(const char *s1
, int n
, const char *s2
)
1212 if (strlen(s2
) != n
)
1214 return memcmp(s1
, s2
, n
) == 0;
1217 /* takes a comma separated list of log masks. Return 0 if error. */
1218 int cpu_str_to_log_mask(const char *str
)
1227 p1
= strchr(p
, ',');
1230 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1231 if (cmp1(p
, p1
- p
, item
->name
))
1244 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1249 fprintf(stderr
, "qemu: fatal: ");
1250 vfprintf(stderr
, fmt
, ap
);
1251 fprintf(stderr
, "\n");
1253 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1259 #if !defined(CONFIG_USER_ONLY)
1261 /* NOTE: if flush_global is true, also flush global entries (not
1263 void tlb_flush(CPUState
*env
, int flush_global
)
1267 #if defined(DEBUG_TLB)
1268 printf("tlb_flush:\n");
1270 /* must reset current TB so that interrupts cannot modify the
1271 links while we are modifying them */
1272 env
->current_tb
= NULL
;
1274 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1275 env
->tlb_read
[0][i
].address
= -1;
1276 env
->tlb_write
[0][i
].address
= -1;
1277 env
->tlb_read
[1][i
].address
= -1;
1278 env
->tlb_write
[1][i
].address
= -1;
1282 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1285 #if !defined(CONFIG_SOFTMMU)
1286 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1290 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1292 if (addr
== (tlb_entry
->address
&
1293 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1294 tlb_entry
->address
= -1;
1297 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1302 TranslationBlock
*tb
;
1304 #if defined(DEBUG_TLB)
1305 printf("tlb_flush_page: 0x%08x\n", addr
);
1307 /* must reset current TB so that interrupts cannot modify the
1308 links while we are modifying them */
1309 env
->current_tb
= NULL
;
1311 addr
&= TARGET_PAGE_MASK
;
1312 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1313 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1314 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1315 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1316 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1318 /* remove from the virtual pc hash table all the TB at this
1321 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1322 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1323 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1325 /* we remove all the links to the TBs in this virtual page */
1327 while (tb
!= NULL
) {
1329 tb
= (TranslationBlock
*)((long)tb
& ~3);
1330 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1331 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1334 tb
= tb
->page_next
[n
];
1340 #if !defined(CONFIG_SOFTMMU)
1341 if (addr
< MMAP_AREA_END
)
1342 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1346 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1348 if (addr
== (tlb_entry
->address
&
1349 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1350 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1351 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1352 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1356 /* update the TLBs so that writes to code in the virtual page 'addr'
1358 static void tlb_protect_code(CPUState
*env
, target_ulong addr
)
1362 addr
&= TARGET_PAGE_MASK
;
1363 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1364 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1365 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1366 #if !defined(CONFIG_SOFTMMU)
1367 /* NOTE: as we generated the code for this page, it is already at
1369 if (addr
< MMAP_AREA_END
)
1370 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1374 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1375 unsigned long phys_addr
)
1377 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1378 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1379 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1383 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1384 tested self modifying code */
1385 static void tlb_unprotect_code_phys(CPUState
*env
, unsigned long phys_addr
, target_ulong vaddr
)
1389 phys_addr
&= TARGET_PAGE_MASK
;
1390 phys_addr
+= (long)phys_ram_base
;
1391 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1392 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1393 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1396 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1397 unsigned long start
, unsigned long length
)
1400 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1401 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1402 if ((addr
- start
) < length
) {
1403 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1408 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1411 unsigned long length
, start1
;
1414 start
&= TARGET_PAGE_MASK
;
1415 end
= TARGET_PAGE_ALIGN(end
);
1417 length
= end
- start
;
1420 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1422 env
= cpu_single_env
;
1423 /* we modify the TLB cache so that the dirty bit will be set again
1424 when accessing the range */
1425 start1
= start
+ (unsigned long)phys_ram_base
;
1426 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1427 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1428 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1429 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1431 #if !defined(CONFIG_SOFTMMU)
1432 /* XXX: this is expensive */
1438 for(i
= 0; i
< L1_SIZE
; i
++) {
1441 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1442 for(j
= 0; j
< L2_SIZE
; j
++) {
1443 if (p
->valid_tag
== virt_valid_tag
&&
1444 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1445 (p
->prot
& PROT_WRITE
)) {
1446 if (addr
< MMAP_AREA_END
) {
1447 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1448 p
->prot
& ~PROT_WRITE
);
1451 addr
+= TARGET_PAGE_SIZE
;
1460 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1461 unsigned long start
)
1464 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1465 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1466 if (addr
== start
) {
1467 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1472 /* update the TLB corresponding to virtual page vaddr and phys addr
1473 addr so that it is no longer dirty */
1474 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1476 CPUState
*env
= cpu_single_env
;
1479 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1481 addr
&= TARGET_PAGE_MASK
;
1482 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1483 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1484 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1487 /* add a new TLB entry. At most one entry for a given virtual address
1488 is permitted. Return 0 if OK or 2 if the page could not be mapped
1489 (can only happen in non SOFTMMU mode for I/O pages or pages
1490 conflicting with the host address space). */
1491 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1492 target_phys_addr_t paddr
, int prot
,
1493 int is_user
, int is_softmmu
)
1497 TranslationBlock
*first_tb
;
1499 target_ulong address
;
1500 unsigned long addend
;
1503 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1506 pd
= IO_MEM_UNASSIGNED
;
1509 pd
= p
->phys_offset
;
1510 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1511 /* NOTE: we also allocate the page at this stage */
1512 p1
= page_find_alloc(pd
>> TARGET_PAGE_BITS
);
1513 first_tb
= p1
->first_tb
;
1516 #if defined(DEBUG_TLB)
1517 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1518 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1522 #if !defined(CONFIG_SOFTMMU)
1526 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1527 /* IO memory case */
1528 address
= vaddr
| pd
;
1531 /* standard memory */
1533 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1536 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1538 if (prot
& PAGE_READ
) {
1539 env
->tlb_read
[is_user
][index
].address
= address
;
1540 env
->tlb_read
[is_user
][index
].addend
= addend
;
1542 env
->tlb_read
[is_user
][index
].address
= -1;
1543 env
->tlb_read
[is_user
][index
].addend
= -1;
1545 if (prot
& PAGE_WRITE
) {
1546 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1547 /* ROM: access is ignored (same as unassigned) */
1548 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1549 env
->tlb_write
[is_user
][index
].addend
= addend
;
1551 /* XXX: the PowerPC code seems not ready to handle
1552 self modifying code with DCBI */
1553 #if defined(TARGET_HAS_SMC) || 1
1555 /* if code is present, we use a specific memory
1556 handler. It works only for physical memory access */
1557 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1558 env
->tlb_write
[is_user
][index
].addend
= addend
;
1561 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1562 !cpu_physical_memory_is_dirty(pd
)) {
1563 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1564 env
->tlb_write
[is_user
][index
].addend
= addend
;
1566 env
->tlb_write
[is_user
][index
].address
= address
;
1567 env
->tlb_write
[is_user
][index
].addend
= addend
;
1570 env
->tlb_write
[is_user
][index
].address
= -1;
1571 env
->tlb_write
[is_user
][index
].addend
= -1;
1574 #if !defined(CONFIG_SOFTMMU)
1576 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1577 /* IO access: no mapping is done as it will be handled by the
1579 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1584 if (vaddr
>= MMAP_AREA_END
) {
1587 if (prot
& PROT_WRITE
) {
1588 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1589 #if defined(TARGET_HAS_SMC) || 1
1592 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1593 !cpu_physical_memory_is_dirty(pd
))) {
1594 /* ROM: we do as if code was inside */
1595 /* if code is present, we only map as read only and save the
1599 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1602 vp
->valid_tag
= virt_valid_tag
;
1603 prot
&= ~PAGE_WRITE
;
1606 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1607 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1608 if (map_addr
== MAP_FAILED
) {
1609 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1619 /* called from signal handler: invalidate the code and unprotect the
1620 page. Return TRUE if the fault was succesfully handled. */
1621 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1623 #if !defined(CONFIG_SOFTMMU)
1626 #if defined(DEBUG_TLB)
1627 printf("page_unprotect: addr=0x%08x\n", addr
);
1629 addr
&= TARGET_PAGE_MASK
;
1631 /* if it is not mapped, no need to worry here */
1632 if (addr
>= MMAP_AREA_END
)
1634 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1637 /* NOTE: in this case, validate_tag is _not_ tested as it
1638 validates only the code TLB */
1639 if (vp
->valid_tag
!= virt_valid_tag
)
1641 if (!(vp
->prot
& PAGE_WRITE
))
1643 #if defined(DEBUG_TLB)
1644 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1645 addr
, vp
->phys_addr
, vp
->prot
);
1647 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1648 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1649 (unsigned long)addr
, vp
->prot
);
1650 /* set the dirty bit */
1651 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1652 /* flush the code inside */
1653 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1662 void tlb_flush(CPUState
*env
, int flush_global
)
1666 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1670 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1671 target_phys_addr_t paddr
, int prot
,
1672 int is_user
, int is_softmmu
)
1677 /* dump memory mappings */
1678 void page_dump(FILE *f
)
1680 unsigned long start
, end
;
1681 int i
, j
, prot
, prot1
;
1684 fprintf(f
, "%-8s %-8s %-8s %s\n",
1685 "start", "end", "size", "prot");
1689 for(i
= 0; i
<= L1_SIZE
; i
++) {
1694 for(j
= 0;j
< L2_SIZE
; j
++) {
1699 if (prot1
!= prot
) {
1700 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1702 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1703 start
, end
, end
- start
,
1704 prot
& PAGE_READ
? 'r' : '-',
1705 prot
& PAGE_WRITE
? 'w' : '-',
1706 prot
& PAGE_EXEC
? 'x' : '-');
1720 int page_get_flags(unsigned long address
)
1724 p
= page_find(address
>> TARGET_PAGE_BITS
);
1730 /* modify the flags of a page and invalidate the code if
1731 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1732 depending on PAGE_WRITE */
1733 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1738 start
= start
& TARGET_PAGE_MASK
;
1739 end
= TARGET_PAGE_ALIGN(end
);
1740 if (flags
& PAGE_WRITE
)
1741 flags
|= PAGE_WRITE_ORG
;
1742 spin_lock(&tb_lock
);
1743 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1744 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1745 /* if the write protection is set, then we invalidate the code
1747 if (!(p
->flags
& PAGE_WRITE
) &&
1748 (flags
& PAGE_WRITE
) &&
1750 tb_invalidate_phys_page(addr
, 0, NULL
);
1754 spin_unlock(&tb_lock
);
1757 /* called from signal handler: invalidate the code and unprotect the
1758 page. Return TRUE if the fault was succesfully handled. */
1759 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1761 unsigned int page_index
, prot
, pindex
;
1763 unsigned long host_start
, host_end
, addr
;
1765 host_start
= address
& qemu_host_page_mask
;
1766 page_index
= host_start
>> TARGET_PAGE_BITS
;
1767 p1
= page_find(page_index
);
1770 host_end
= host_start
+ qemu_host_page_size
;
1773 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1777 /* if the page was really writable, then we change its
1778 protection back to writable */
1779 if (prot
& PAGE_WRITE_ORG
) {
1780 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1781 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1782 mprotect((void *)host_start
, qemu_host_page_size
,
1783 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1784 p1
[pindex
].flags
|= PAGE_WRITE
;
1785 /* and since the content will be modified, we must invalidate
1786 the corresponding translated code. */
1787 tb_invalidate_phys_page(address
, pc
, puc
);
1788 #ifdef DEBUG_TB_CHECK
1789 tb_invalidate_check(address
);
1797 /* call this function when system calls directly modify a memory area */
1798 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1800 unsigned long start
, end
, addr
;
1802 start
= (unsigned long)data
;
1803 end
= start
+ data_size
;
1804 start
&= TARGET_PAGE_MASK
;
1805 end
= TARGET_PAGE_ALIGN(end
);
1806 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1807 page_unprotect(addr
, 0, NULL
);
1811 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1814 #endif /* defined(CONFIG_USER_ONLY) */
1816 /* register physical memory. 'size' must be a multiple of the target
1817 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1819 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1821 unsigned long phys_offset
)
1823 unsigned long addr
, end_addr
;
1826 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1827 end_addr
= start_addr
+ size
;
1828 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1829 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1830 p
->phys_offset
= phys_offset
;
1831 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1832 phys_offset
+= TARGET_PAGE_SIZE
;
1836 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1841 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1845 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1846 unassigned_mem_readb
,
1847 unassigned_mem_readb
,
1848 unassigned_mem_readb
,
1851 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1852 unassigned_mem_writeb
,
1853 unassigned_mem_writeb
,
1854 unassigned_mem_writeb
,
1857 /* self modifying code support in soft mmu mode : writing to a page
1858 containing code comes to these functions */
1860 static void code_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1862 unsigned long phys_addr
;
1864 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1865 #if !defined(CONFIG_USER_ONLY)
1866 tb_invalidate_phys_page_fast(phys_addr
, 1);
1868 stb_raw((uint8_t *)addr
, val
);
1869 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1872 static void code_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1874 unsigned long phys_addr
;
1876 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1877 #if !defined(CONFIG_USER_ONLY)
1878 tb_invalidate_phys_page_fast(phys_addr
, 2);
1880 stw_raw((uint8_t *)addr
, val
);
1881 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1884 static void code_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1886 unsigned long phys_addr
;
1888 phys_addr
= addr
- (unsigned long)phys_ram_base
;
1889 #if !defined(CONFIG_USER_ONLY)
1890 tb_invalidate_phys_page_fast(phys_addr
, 4);
1892 stl_raw((uint8_t *)addr
, val
);
1893 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1896 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1897 NULL
, /* never used */
1898 NULL
, /* never used */
1899 NULL
, /* never used */
1902 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1908 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1910 stb_raw((uint8_t *)addr
, val
);
1911 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1914 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1916 stw_raw((uint8_t *)addr
, val
);
1917 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1920 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1922 stl_raw((uint8_t *)addr
, val
);
1923 tlb_set_dirty(addr
, cpu_single_env
->mem_write_vaddr
);
1926 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1927 notdirty_mem_writeb
,
1928 notdirty_mem_writew
,
1929 notdirty_mem_writel
,
1932 static void io_mem_init(void)
1934 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
, NULL
);
1935 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1936 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
, NULL
);
1937 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
, NULL
);
1940 /* alloc dirty bits array */
1941 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1944 /* mem_read and mem_write are arrays of functions containing the
1945 function to access byte (index 0), word (index 1) and dword (index
1946 2). All functions must be supplied. If io_index is non zero, the
1947 corresponding io zone is modified. If it is zero, a new io zone is
1948 allocated. The return value can be used with
1949 cpu_register_physical_memory(). (-1) is returned if error. */
1950 int cpu_register_io_memory(int io_index
,
1951 CPUReadMemoryFunc
**mem_read
,
1952 CPUWriteMemoryFunc
**mem_write
,
1957 if (io_index
<= 0) {
1958 if (io_index
>= IO_MEM_NB_ENTRIES
)
1960 io_index
= io_mem_nb
++;
1962 if (io_index
>= IO_MEM_NB_ENTRIES
)
1966 for(i
= 0;i
< 3; i
++) {
1967 io_mem_read
[io_index
][i
] = mem_read
[i
];
1968 io_mem_write
[io_index
][i
] = mem_write
[i
];
1970 io_mem_opaque
[io_index
] = opaque
;
1971 return io_index
<< IO_MEM_SHIFT
;
1974 /* physical memory access (slow version, mainly for debug) */
1975 #if defined(CONFIG_USER_ONLY)
1976 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1977 int len
, int is_write
)
1983 page
= addr
& TARGET_PAGE_MASK
;
1984 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1987 flags
= page_get_flags(page
);
1988 if (!(flags
& PAGE_VALID
))
1991 if (!(flags
& PAGE_WRITE
))
1993 memcpy((uint8_t *)addr
, buf
, len
);
1995 if (!(flags
& PAGE_READ
))
1997 memcpy(buf
, (uint8_t *)addr
, len
);
2005 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2006 int len
, int is_write
)
2011 target_phys_addr_t page
;
2016 page
= addr
& TARGET_PAGE_MASK
;
2017 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2020 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2022 pd
= IO_MEM_UNASSIGNED
;
2024 pd
= p
->phys_offset
;
2028 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
2029 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2030 if (l
>= 4 && ((addr
& 3) == 0)) {
2031 /* 32 bit read access */
2033 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2035 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2036 /* 16 bit read access */
2037 val
= lduw_raw(buf
);
2038 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2042 val
= ldub_raw(buf
);
2043 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2047 unsigned long addr1
;
2048 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2050 ptr
= phys_ram_base
+ addr1
;
2051 memcpy(ptr
, buf
, l
);
2052 /* invalidate code */
2053 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2055 phys_ram_dirty
[page
>> TARGET_PAGE_BITS
] = 1;
2058 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2059 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
2061 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2062 if (l
>= 4 && ((addr
& 3) == 0)) {
2063 /* 32 bit read access */
2064 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2067 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2068 /* 16 bit read access */
2069 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2074 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2080 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2081 (addr
& ~TARGET_PAGE_MASK
);
2082 memcpy(buf
, ptr
, l
);
2092 /* virtual memory access for debug */
2093 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2094 uint8_t *buf
, int len
, int is_write
)
2097 target_ulong page
, phys_addr
;
2100 page
= addr
& TARGET_PAGE_MASK
;
2101 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2102 /* if no physical page mapped, return an error */
2103 if (phys_addr
== -1)
2105 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2108 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2117 #if !defined(CONFIG_USER_ONLY)
2119 #define MMUSUFFIX _cmmu
2120 #define GETPC() NULL
2121 #define env cpu_single_env
2124 #include "softmmu_template.h"
2127 #include "softmmu_template.h"
2130 #include "softmmu_template.h"
2133 #include "softmmu_template.h"