2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
38 //#define DEBUG_TB_INVALIDATE
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
49 #define SMC_BITMAP_USE_THRESHOLD 10
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
63 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
64 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
66 /* any access to the tbs or the page table must use this lock */
67 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
69 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
70 uint8_t *code_gen_ptr
;
74 uint8_t *phys_ram_base
;
75 uint8_t *phys_ram_dirty
;
78 /* current CPU in the current thread. It is only valid inside
80 CPUState
*cpu_single_env
;
82 typedef struct PageDesc
{
83 /* list of TBs intersecting this ram page */
84 TranslationBlock
*first_tb
;
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count
;
89 #if defined(CONFIG_USER_ONLY)
94 typedef struct PhysPageDesc
{
95 /* offset in host memory of the page + io_index in the low 12 bits */
100 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
102 #define L1_SIZE (1 << L1_BITS)
103 #define L2_SIZE (1 << L2_BITS)
105 static void io_mem_init(void);
107 unsigned long qemu_real_host_page_size
;
108 unsigned long qemu_host_page_bits
;
109 unsigned long qemu_host_page_size
;
110 unsigned long qemu_host_page_mask
;
112 /* XXX: for system emulation, it could just be an array */
113 static PageDesc
*l1_map
[L1_SIZE
];
114 PhysPageDesc
**l1_phys_map
;
116 /* io memory support */
117 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
118 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
119 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
120 static int io_mem_nb
;
123 char *logfilename
= "/tmp/qemu.log";
128 static int tlb_flush_count
;
129 static int tb_flush_count
;
130 static int tb_phys_invalidate_count
;
132 static void page_init(void)
134 /* NOTE: we can always suppose that qemu_host_page_size >=
138 SYSTEM_INFO system_info
;
141 GetSystemInfo(&system_info
);
142 qemu_real_host_page_size
= system_info
.dwPageSize
;
144 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
145 PAGE_EXECUTE_READWRITE
, &old_protect
);
148 qemu_real_host_page_size
= getpagesize();
150 unsigned long start
, end
;
152 start
= (unsigned long)code_gen_buffer
;
153 start
&= ~(qemu_real_host_page_size
- 1);
155 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
156 end
+= qemu_real_host_page_size
- 1;
157 end
&= ~(qemu_real_host_page_size
- 1);
159 mprotect((void *)start
, end
- start
,
160 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
164 if (qemu_host_page_size
== 0)
165 qemu_host_page_size
= qemu_real_host_page_size
;
166 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
167 qemu_host_page_size
= TARGET_PAGE_SIZE
;
168 qemu_host_page_bits
= 0;
169 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
170 qemu_host_page_bits
++;
171 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
172 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
173 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
176 static inline PageDesc
*page_find_alloc(unsigned int index
)
180 lp
= &l1_map
[index
>> L2_BITS
];
183 /* allocate if not found */
184 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
185 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
188 return p
+ (index
& (L2_SIZE
- 1));
191 static inline PageDesc
*page_find(unsigned int index
)
195 p
= l1_map
[index
>> L2_BITS
];
198 return p
+ (index
& (L2_SIZE
- 1));
201 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
205 p
= (void **)l1_phys_map
;
206 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
208 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
211 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
214 /* allocate if not found */
217 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
218 memset(p
, 0, sizeof(void *) * L1_SIZE
);
222 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
225 /* allocate if not found */
228 p
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
229 memset(p
, 0, sizeof(PhysPageDesc
) * L2_SIZE
);
232 return ((PhysPageDesc
*)p
) + (index
& (L2_SIZE
- 1));
235 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
237 return phys_page_find_alloc(index
, 0);
240 #if !defined(CONFIG_USER_ONLY)
241 static void tlb_protect_code(ram_addr_t ram_addr
);
242 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
246 void cpu_exec_init(CPUState
*env
)
252 code_gen_ptr
= code_gen_buffer
;
256 env
->next_cpu
= NULL
;
259 while (*penv
!= NULL
) {
260 penv
= (CPUState
**)&(*penv
)->next_cpu
;
263 env
->cpu_index
= cpu_index
;
267 static inline void invalidate_page_bitmap(PageDesc
*p
)
269 if (p
->code_bitmap
) {
270 qemu_free(p
->code_bitmap
);
271 p
->code_bitmap
= NULL
;
273 p
->code_write_count
= 0;
276 /* set to NULL all the 'first_tb' fields in all PageDescs */
277 static void page_flush_tb(void)
282 for(i
= 0; i
< L1_SIZE
; i
++) {
285 for(j
= 0; j
< L2_SIZE
; j
++) {
287 invalidate_page_bitmap(p
);
294 /* flush all the translation blocks */
295 /* XXX: tb_flush is currently not thread safe */
296 void tb_flush(CPUState
*env1
)
299 #if defined(DEBUG_FLUSH)
300 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301 code_gen_ptr
- code_gen_buffer
,
303 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
307 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
308 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
311 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
314 code_gen_ptr
= code_gen_buffer
;
315 /* XXX: flush processor icache at this point if cache flush is
320 #ifdef DEBUG_TB_CHECK
322 static void tb_invalidate_check(unsigned long address
)
324 TranslationBlock
*tb
;
326 address
&= TARGET_PAGE_MASK
;
327 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
328 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
329 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
330 address
>= tb
->pc
+ tb
->size
)) {
331 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332 address
, tb
->pc
, tb
->size
);
338 /* verify that all the pages have correct rights for code */
339 static void tb_page_check(void)
341 TranslationBlock
*tb
;
342 int i
, flags1
, flags2
;
344 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
345 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
346 flags1
= page_get_flags(tb
->pc
);
347 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
348 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
349 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350 tb
->pc
, tb
->size
, flags1
, flags2
);
356 void tb_jmp_check(TranslationBlock
*tb
)
358 TranslationBlock
*tb1
;
361 /* suppress any remaining jumps to this TB */
365 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
368 tb1
= tb1
->jmp_next
[n1
];
370 /* check end of list */
372 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
378 /* invalidate one TB */
379 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
382 TranslationBlock
*tb1
;
386 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
389 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
393 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
395 TranslationBlock
*tb1
;
401 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
403 *ptb
= tb1
->page_next
[n1
];
406 ptb
= &tb1
->page_next
[n1
];
410 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
412 TranslationBlock
*tb1
, **ptb
;
415 ptb
= &tb
->jmp_next
[n
];
418 /* find tb(n) in circular list */
422 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
423 if (n1
== n
&& tb1
== tb
)
426 ptb
= &tb1
->jmp_first
;
428 ptb
= &tb1
->jmp_next
[n1
];
431 /* now we can suppress tb(n) from the list */
432 *ptb
= tb
->jmp_next
[n
];
434 tb
->jmp_next
[n
] = NULL
;
438 /* reset the jump entry 'n' of a TB so that it is not chained to
440 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
442 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
445 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
450 target_ulong phys_pc
;
451 TranslationBlock
*tb1
, *tb2
;
453 /* remove the TB from the hash list */
454 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
455 h
= tb_phys_hash_func(phys_pc
);
456 tb_remove(&tb_phys_hash
[h
], tb
,
457 offsetof(TranslationBlock
, phys_hash_next
));
459 /* remove the TB from the page list */
460 if (tb
->page_addr
[0] != page_addr
) {
461 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
462 tb_page_remove(&p
->first_tb
, tb
);
463 invalidate_page_bitmap(p
);
465 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
466 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
467 tb_page_remove(&p
->first_tb
, tb
);
468 invalidate_page_bitmap(p
);
471 tb_invalidated_flag
= 1;
473 /* remove the TB from the hash list */
474 h
= tb_jmp_cache_hash_func(tb
->pc
);
475 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
476 if (env
->tb_jmp_cache
[h
] == tb
)
477 env
->tb_jmp_cache
[h
] = NULL
;
480 /* suppress this TB from the two jump lists */
481 tb_jmp_remove(tb
, 0);
482 tb_jmp_remove(tb
, 1);
484 /* suppress any remaining jumps to this TB */
490 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
491 tb2
= tb1
->jmp_next
[n1
];
492 tb_reset_jump(tb1
, n1
);
493 tb1
->jmp_next
[n1
] = NULL
;
496 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
498 tb_phys_invalidate_count
++;
501 static inline void set_bits(uint8_t *tab
, int start
, int len
)
507 mask
= 0xff << (start
& 7);
508 if ((start
& ~7) == (end
& ~7)) {
510 mask
&= ~(0xff << (end
& 7));
515 start
= (start
+ 8) & ~7;
517 while (start
< end1
) {
522 mask
= ~(0xff << (end
& 7));
528 static void build_page_bitmap(PageDesc
*p
)
530 int n
, tb_start
, tb_end
;
531 TranslationBlock
*tb
;
533 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
536 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
541 tb
= (TranslationBlock
*)((long)tb
& ~3);
542 /* NOTE: this is subtle as a TB may span two physical pages */
544 /* NOTE: tb_end may be after the end of the page, but
545 it is not a problem */
546 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
547 tb_end
= tb_start
+ tb
->size
;
548 if (tb_end
> TARGET_PAGE_SIZE
)
549 tb_end
= TARGET_PAGE_SIZE
;
552 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
554 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
555 tb
= tb
->page_next
[n
];
559 #ifdef TARGET_HAS_PRECISE_SMC
561 static void tb_gen_code(CPUState
*env
,
562 target_ulong pc
, target_ulong cs_base
, int flags
,
565 TranslationBlock
*tb
;
567 target_ulong phys_pc
, phys_page2
, virt_page2
;
570 phys_pc
= get_phys_addr_code(env
, pc
);
573 /* flush must be done */
575 /* cannot fail at this point */
578 tc_ptr
= code_gen_ptr
;
580 tb
->cs_base
= cs_base
;
583 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
584 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
586 /* check next page if needed */
587 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
589 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
590 phys_page2
= get_phys_addr_code(env
, virt_page2
);
592 tb_link_phys(tb
, phys_pc
, phys_page2
);
596 /* invalidate all TBs which intersect with the target physical page
597 starting in range [start;end[. NOTE: start and end must refer to
598 the same physical page. 'is_cpu_write_access' should be true if called
599 from a real cpu write access: the virtual CPU will exit the current
600 TB if code is modified inside this TB. */
601 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
602 int is_cpu_write_access
)
604 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
605 CPUState
*env
= cpu_single_env
;
607 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
608 target_ulong tb_start
, tb_end
;
609 target_ulong current_pc
, current_cs_base
;
611 p
= page_find(start
>> TARGET_PAGE_BITS
);
614 if (!p
->code_bitmap
&&
615 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
616 is_cpu_write_access
) {
617 /* build code bitmap */
618 build_page_bitmap(p
);
621 /* we remove all the TBs in the range [start, end[ */
622 /* XXX: see if in some cases it could be faster to invalidate all the code */
623 current_tb_not_found
= is_cpu_write_access
;
624 current_tb_modified
= 0;
625 current_tb
= NULL
; /* avoid warning */
626 current_pc
= 0; /* avoid warning */
627 current_cs_base
= 0; /* avoid warning */
628 current_flags
= 0; /* avoid warning */
632 tb
= (TranslationBlock
*)((long)tb
& ~3);
633 tb_next
= tb
->page_next
[n
];
634 /* NOTE: this is subtle as a TB may span two physical pages */
636 /* NOTE: tb_end may be after the end of the page, but
637 it is not a problem */
638 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
639 tb_end
= tb_start
+ tb
->size
;
641 tb_start
= tb
->page_addr
[1];
642 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
644 if (!(tb_end
<= start
|| tb_start
>= end
)) {
645 #ifdef TARGET_HAS_PRECISE_SMC
646 if (current_tb_not_found
) {
647 current_tb_not_found
= 0;
649 if (env
->mem_write_pc
) {
650 /* now we have a real cpu fault */
651 current_tb
= tb_find_pc(env
->mem_write_pc
);
654 if (current_tb
== tb
&&
655 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
656 /* If we are modifying the current TB, we must stop
657 its execution. We could be more precise by checking
658 that the modification is after the current PC, but it
659 would require a specialized function to partially
660 restore the CPU state */
662 current_tb_modified
= 1;
663 cpu_restore_state(current_tb
, env
,
664 env
->mem_write_pc
, NULL
);
665 #if defined(TARGET_I386)
666 current_flags
= env
->hflags
;
667 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
668 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
669 current_pc
= current_cs_base
+ env
->eip
;
671 #error unsupported CPU
674 #endif /* TARGET_HAS_PRECISE_SMC */
675 /* we need to do that to handle the case where a signal
676 occurs while doing tb_phys_invalidate() */
679 saved_tb
= env
->current_tb
;
680 env
->current_tb
= NULL
;
682 tb_phys_invalidate(tb
, -1);
684 env
->current_tb
= saved_tb
;
685 if (env
->interrupt_request
&& env
->current_tb
)
686 cpu_interrupt(env
, env
->interrupt_request
);
691 #if !defined(CONFIG_USER_ONLY)
692 /* if no code remaining, no need to continue to use slow writes */
694 invalidate_page_bitmap(p
);
695 if (is_cpu_write_access
) {
696 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
700 #ifdef TARGET_HAS_PRECISE_SMC
701 if (current_tb_modified
) {
702 /* we generate a block containing just the instruction
703 modifying the memory. It will ensure that it cannot modify
705 env
->current_tb
= NULL
;
706 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
708 cpu_resume_from_signal(env
, NULL
);
713 /* len must be <= 8 and start must be a multiple of len */
714 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
721 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
722 cpu_single_env
->mem_write_vaddr
, len
,
724 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
728 p
= page_find(start
>> TARGET_PAGE_BITS
);
731 if (p
->code_bitmap
) {
732 offset
= start
& ~TARGET_PAGE_MASK
;
733 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
734 if (b
& ((1 << len
) - 1))
738 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
742 #if !defined(CONFIG_SOFTMMU)
743 static void tb_invalidate_phys_page(target_ulong addr
,
744 unsigned long pc
, void *puc
)
746 int n
, current_flags
, current_tb_modified
;
747 target_ulong current_pc
, current_cs_base
;
749 TranslationBlock
*tb
, *current_tb
;
750 #ifdef TARGET_HAS_PRECISE_SMC
751 CPUState
*env
= cpu_single_env
;
754 addr
&= TARGET_PAGE_MASK
;
755 p
= page_find(addr
>> TARGET_PAGE_BITS
);
759 current_tb_modified
= 0;
761 current_pc
= 0; /* avoid warning */
762 current_cs_base
= 0; /* avoid warning */
763 current_flags
= 0; /* avoid warning */
764 #ifdef TARGET_HAS_PRECISE_SMC
766 current_tb
= tb_find_pc(pc
);
771 tb
= (TranslationBlock
*)((long)tb
& ~3);
772 #ifdef TARGET_HAS_PRECISE_SMC
773 if (current_tb
== tb
&&
774 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
775 /* If we are modifying the current TB, we must stop
776 its execution. We could be more precise by checking
777 that the modification is after the current PC, but it
778 would require a specialized function to partially
779 restore the CPU state */
781 current_tb_modified
= 1;
782 cpu_restore_state(current_tb
, env
, pc
, puc
);
783 #if defined(TARGET_I386)
784 current_flags
= env
->hflags
;
785 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
786 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
787 current_pc
= current_cs_base
+ env
->eip
;
789 #error unsupported CPU
792 #endif /* TARGET_HAS_PRECISE_SMC */
793 tb_phys_invalidate(tb
, addr
);
794 tb
= tb
->page_next
[n
];
797 #ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb_modified
) {
799 /* we generate a block containing just the instruction
800 modifying the memory. It will ensure that it cannot modify
802 env
->current_tb
= NULL
;
803 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
805 cpu_resume_from_signal(env
, puc
);
811 /* add the tb in the target page and protect it if necessary */
812 static inline void tb_alloc_page(TranslationBlock
*tb
,
813 unsigned int n
, unsigned int page_addr
)
816 TranslationBlock
*last_first_tb
;
818 tb
->page_addr
[n
] = page_addr
;
819 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
820 tb
->page_next
[n
] = p
->first_tb
;
821 last_first_tb
= p
->first_tb
;
822 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
823 invalidate_page_bitmap(p
);
825 #if defined(TARGET_HAS_SMC) || 1
827 #if defined(CONFIG_USER_ONLY)
828 if (p
->flags
& PAGE_WRITE
) {
829 unsigned long host_start
, host_end
, addr
;
832 /* force the host page as non writable (writes will have a
833 page fault + mprotect overhead) */
834 host_start
= page_addr
& qemu_host_page_mask
;
835 host_end
= host_start
+ qemu_host_page_size
;
837 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
838 prot
|= page_get_flags(addr
);
839 mprotect((void *)host_start
, qemu_host_page_size
,
840 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
841 #ifdef DEBUG_TB_INVALIDATE
842 printf("protecting code page: 0x%08lx\n",
845 p
->flags
&= ~PAGE_WRITE
;
848 /* if some code is already present, then the pages are already
849 protected. So we handle the case where only the first TB is
850 allocated in a physical page */
851 if (!last_first_tb
) {
852 tlb_protect_code(page_addr
);
856 #endif /* TARGET_HAS_SMC */
859 /* Allocate a new translation block. Flush the translation buffer if
860 too many translation blocks or too much generated code. */
861 TranslationBlock
*tb_alloc(target_ulong pc
)
863 TranslationBlock
*tb
;
865 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
866 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
874 /* add a new TB and link it to the physical page tables. phys_page2 is
875 (-1) to indicate that only one page contains the TB. */
876 void tb_link_phys(TranslationBlock
*tb
,
877 target_ulong phys_pc
, target_ulong phys_page2
)
880 TranslationBlock
**ptb
;
882 /* add in the physical hash table */
883 h
= tb_phys_hash_func(phys_pc
);
884 ptb
= &tb_phys_hash
[h
];
885 tb
->phys_hash_next
= *ptb
;
888 /* add in the page list */
889 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
890 if (phys_page2
!= -1)
891 tb_alloc_page(tb
, 1, phys_page2
);
893 tb
->page_addr
[1] = -1;
895 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
896 tb
->jmp_next
[0] = NULL
;
897 tb
->jmp_next
[1] = NULL
;
899 tb
->cflags
&= ~CF_FP_USED
;
900 if (tb
->cflags
& CF_TB_FP_USED
)
901 tb
->cflags
|= CF_FP_USED
;
904 /* init original jump addresses */
905 if (tb
->tb_next_offset
[0] != 0xffff)
906 tb_reset_jump(tb
, 0);
907 if (tb
->tb_next_offset
[1] != 0xffff)
908 tb_reset_jump(tb
, 1);
910 #ifdef DEBUG_TB_CHECK
915 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
916 tb[1].tc_ptr. Return NULL if not found */
917 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
921 TranslationBlock
*tb
;
925 if (tc_ptr
< (unsigned long)code_gen_buffer
||
926 tc_ptr
>= (unsigned long)code_gen_ptr
)
928 /* binary search (cf Knuth) */
931 while (m_min
<= m_max
) {
932 m
= (m_min
+ m_max
) >> 1;
934 v
= (unsigned long)tb
->tc_ptr
;
937 else if (tc_ptr
< v
) {
946 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
948 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
950 TranslationBlock
*tb1
, *tb_next
, **ptb
;
953 tb1
= tb
->jmp_next
[n
];
955 /* find head of list */
958 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
961 tb1
= tb1
->jmp_next
[n1
];
963 /* we are now sure now that tb jumps to tb1 */
966 /* remove tb from the jmp_first list */
967 ptb
= &tb_next
->jmp_first
;
971 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
972 if (n1
== n
&& tb1
== tb
)
974 ptb
= &tb1
->jmp_next
[n1
];
976 *ptb
= tb
->jmp_next
[n
];
977 tb
->jmp_next
[n
] = NULL
;
979 /* suppress the jump to next tb in generated code */
980 tb_reset_jump(tb
, n
);
982 /* suppress jumps in the tb on which we could have jumped */
983 tb_reset_jump_recursive(tb_next
);
987 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
989 tb_reset_jump_recursive2(tb
, 0);
990 tb_reset_jump_recursive2(tb
, 1);
993 #if defined(TARGET_HAS_ICE)
994 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
996 target_ulong phys_addr
;
998 phys_addr
= cpu_get_phys_page_debug(env
, pc
);
999 tb_invalidate_phys_page_range(phys_addr
, phys_addr
+ 1, 0);
1003 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1004 breakpoint is reached */
1005 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1007 #if defined(TARGET_HAS_ICE)
1010 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1011 if (env
->breakpoints
[i
] == pc
)
1015 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1017 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1019 breakpoint_invalidate(env
, pc
);
1026 /* remove a breakpoint */
1027 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1029 #if defined(TARGET_HAS_ICE)
1031 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1032 if (env
->breakpoints
[i
] == pc
)
1037 env
->nb_breakpoints
--;
1038 if (i
< env
->nb_breakpoints
)
1039 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1041 breakpoint_invalidate(env
, pc
);
1048 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1049 CPU loop after each instruction */
1050 void cpu_single_step(CPUState
*env
, int enabled
)
1052 #if defined(TARGET_HAS_ICE)
1053 if (env
->singlestep_enabled
!= enabled
) {
1054 env
->singlestep_enabled
= enabled
;
1055 /* must flush all the translated code to avoid inconsistancies */
1056 /* XXX: only flush what is necessary */
1062 /* enable or disable low levels log */
1063 void cpu_set_log(int log_flags
)
1065 loglevel
= log_flags
;
1066 if (loglevel
&& !logfile
) {
1067 logfile
= fopen(logfilename
, "w");
1069 perror(logfilename
);
1072 #if !defined(CONFIG_SOFTMMU)
1073 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1075 static uint8_t logfile_buf
[4096];
1076 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1079 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1084 void cpu_set_log_filename(const char *filename
)
1086 logfilename
= strdup(filename
);
1089 /* mask must never be zero, except for A20 change call */
1090 void cpu_interrupt(CPUState
*env
, int mask
)
1092 TranslationBlock
*tb
;
1093 static int interrupt_lock
;
1095 env
->interrupt_request
|= mask
;
1096 /* if the cpu is currently executing code, we must unlink it and
1097 all the potentially executing TB */
1098 tb
= env
->current_tb
;
1099 if (tb
&& !testandset(&interrupt_lock
)) {
1100 env
->current_tb
= NULL
;
1101 tb_reset_jump_recursive(tb
);
1106 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1108 env
->interrupt_request
&= ~mask
;
1111 CPULogItem cpu_log_items
[] = {
1112 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1113 "show generated host assembly code for each compiled TB" },
1114 { CPU_LOG_TB_IN_ASM
, "in_asm",
1115 "show target assembly code for each compiled TB" },
1116 { CPU_LOG_TB_OP
, "op",
1117 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1119 { CPU_LOG_TB_OP_OPT
, "op_opt",
1120 "show micro ops after optimization for each compiled TB" },
1122 { CPU_LOG_INT
, "int",
1123 "show interrupts/exceptions in short format" },
1124 { CPU_LOG_EXEC
, "exec",
1125 "show trace before each executed TB (lots of logs)" },
1126 { CPU_LOG_TB_CPU
, "cpu",
1127 "show CPU state before bloc translation" },
1129 { CPU_LOG_PCALL
, "pcall",
1130 "show protected mode far calls/returns/exceptions" },
1133 { CPU_LOG_IOPORT
, "ioport",
1134 "show all i/o ports accesses" },
1139 static int cmp1(const char *s1
, int n
, const char *s2
)
1141 if (strlen(s2
) != n
)
1143 return memcmp(s1
, s2
, n
) == 0;
1146 /* takes a comma separated list of log masks. Return 0 if error. */
1147 int cpu_str_to_log_mask(const char *str
)
1156 p1
= strchr(p
, ',');
1159 if(cmp1(p
,p1
-p
,"all")) {
1160 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1164 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1165 if (cmp1(p
, p1
- p
, item
->name
))
1179 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1184 fprintf(stderr
, "qemu: fatal: ");
1185 vfprintf(stderr
, fmt
, ap
);
1186 fprintf(stderr
, "\n");
1188 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1190 cpu_dump_state(env
, stderr
, fprintf
, 0);
1196 #if !defined(CONFIG_USER_ONLY)
1198 /* NOTE: if flush_global is true, also flush global entries (not
1200 void tlb_flush(CPUState
*env
, int flush_global
)
1204 #if defined(DEBUG_TLB)
1205 printf("tlb_flush:\n");
1207 /* must reset current TB so that interrupts cannot modify the
1208 links while we are modifying them */
1209 env
->current_tb
= NULL
;
1211 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1212 env
->tlb_read
[0][i
].address
= -1;
1213 env
->tlb_write
[0][i
].address
= -1;
1214 env
->tlb_read
[1][i
].address
= -1;
1215 env
->tlb_write
[1][i
].address
= -1;
1218 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1220 #if !defined(CONFIG_SOFTMMU)
1221 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1224 if (env
->kqemu_enabled
) {
1225 kqemu_flush(env
, flush_global
);
1231 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1233 if (addr
== (tlb_entry
->address
&
1234 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1235 tlb_entry
->address
= -1;
1238 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1241 TranslationBlock
*tb
;
1243 #if defined(DEBUG_TLB)
1244 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1246 /* must reset current TB so that interrupts cannot modify the
1247 links while we are modifying them */
1248 env
->current_tb
= NULL
;
1250 addr
&= TARGET_PAGE_MASK
;
1251 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1252 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1253 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1254 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1255 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1257 for(i
= 0; i
< TB_JMP_CACHE_SIZE
; i
++) {
1258 tb
= env
->tb_jmp_cache
[i
];
1260 ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1261 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
)) {
1262 env
->tb_jmp_cache
[i
] = NULL
;
1266 #if !defined(CONFIG_SOFTMMU)
1267 if (addr
< MMAP_AREA_END
)
1268 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1271 if (env
->kqemu_enabled
) {
1272 kqemu_flush_page(env
, addr
);
1277 /* update the TLBs so that writes to code in the virtual page 'addr'
1279 static void tlb_protect_code(ram_addr_t ram_addr
)
1281 cpu_physical_memory_reset_dirty(ram_addr
,
1282 ram_addr
+ TARGET_PAGE_SIZE
,
1286 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1287 tested for self modifying code */
1288 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1291 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1294 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1295 unsigned long start
, unsigned long length
)
1298 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1299 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1300 if ((addr
- start
) < length
) {
1301 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1306 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1310 unsigned long length
, start1
;
1314 start
&= TARGET_PAGE_MASK
;
1315 end
= TARGET_PAGE_ALIGN(end
);
1317 length
= end
- start
;
1320 len
= length
>> TARGET_PAGE_BITS
;
1322 /* XXX: should not depend on cpu context */
1324 if (env
->kqemu_enabled
) {
1327 for(i
= 0; i
< len
; i
++) {
1328 kqemu_set_notdirty(env
, addr
);
1329 addr
+= TARGET_PAGE_SIZE
;
1333 mask
= ~dirty_flags
;
1334 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1335 for(i
= 0; i
< len
; i
++)
1338 /* we modify the TLB cache so that the dirty bit will be set again
1339 when accessing the range */
1340 start1
= start
+ (unsigned long)phys_ram_base
;
1341 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1342 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1343 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1344 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1345 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1348 #if !defined(CONFIG_SOFTMMU)
1349 /* XXX: this is expensive */
1355 for(i
= 0; i
< L1_SIZE
; i
++) {
1358 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1359 for(j
= 0; j
< L2_SIZE
; j
++) {
1360 if (p
->valid_tag
== virt_valid_tag
&&
1361 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1362 (p
->prot
& PROT_WRITE
)) {
1363 if (addr
< MMAP_AREA_END
) {
1364 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1365 p
->prot
& ~PROT_WRITE
);
1368 addr
+= TARGET_PAGE_SIZE
;
1377 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1379 ram_addr_t ram_addr
;
1381 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1382 ram_addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) +
1383 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1384 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1385 tlb_entry
->address
|= IO_MEM_NOTDIRTY
;
1390 /* update the TLB according to the current state of the dirty bits */
1391 void cpu_tlb_update_dirty(CPUState
*env
)
1394 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1395 tlb_update_dirty(&env
->tlb_write
[0][i
]);
1396 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1397 tlb_update_dirty(&env
->tlb_write
[1][i
]);
1400 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1401 unsigned long start
)
1404 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1405 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1406 if (addr
== start
) {
1407 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1412 /* update the TLB corresponding to virtual page vaddr and phys addr
1413 addr so that it is no longer dirty */
1414 static inline void tlb_set_dirty(CPUState
*env
,
1415 unsigned long addr
, target_ulong vaddr
)
1419 addr
&= TARGET_PAGE_MASK
;
1420 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1421 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1422 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1425 /* add a new TLB entry. At most one entry for a given virtual address
1426 is permitted. Return 0 if OK or 2 if the page could not be mapped
1427 (can only happen in non SOFTMMU mode for I/O pages or pages
1428 conflicting with the host address space). */
1429 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1430 target_phys_addr_t paddr
, int prot
,
1431 int is_user
, int is_softmmu
)
1436 target_ulong address
;
1437 target_phys_addr_t addend
;
1440 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1442 pd
= IO_MEM_UNASSIGNED
;
1444 pd
= p
->phys_offset
;
1446 #if defined(DEBUG_TLB)
1447 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1448 vaddr
, paddr
, prot
, is_user
, is_softmmu
, pd
);
1452 #if !defined(CONFIG_SOFTMMU)
1456 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1457 /* IO memory case */
1458 address
= vaddr
| pd
;
1461 /* standard memory */
1463 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1466 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1468 if (prot
& PAGE_READ
) {
1469 env
->tlb_read
[is_user
][index
].address
= address
;
1470 env
->tlb_read
[is_user
][index
].addend
= addend
;
1472 env
->tlb_read
[is_user
][index
].address
= -1;
1473 env
->tlb_read
[is_user
][index
].addend
= -1;
1475 if (prot
& PAGE_WRITE
) {
1476 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1477 /* ROM: access is ignored (same as unassigned) */
1478 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1479 env
->tlb_write
[is_user
][index
].addend
= addend
;
1480 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1481 !cpu_physical_memory_is_dirty(pd
)) {
1482 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1483 env
->tlb_write
[is_user
][index
].addend
= addend
;
1485 env
->tlb_write
[is_user
][index
].address
= address
;
1486 env
->tlb_write
[is_user
][index
].addend
= addend
;
1489 env
->tlb_write
[is_user
][index
].address
= -1;
1490 env
->tlb_write
[is_user
][index
].addend
= -1;
1493 #if !defined(CONFIG_SOFTMMU)
1495 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1496 /* IO access: no mapping is done as it will be handled by the
1498 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1503 if (vaddr
>= MMAP_AREA_END
) {
1506 if (prot
& PROT_WRITE
) {
1507 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1508 #if defined(TARGET_HAS_SMC) || 1
1511 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1512 !cpu_physical_memory_is_dirty(pd
))) {
1513 /* ROM: we do as if code was inside */
1514 /* if code is present, we only map as read only and save the
1518 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1521 vp
->valid_tag
= virt_valid_tag
;
1522 prot
&= ~PAGE_WRITE
;
1525 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1526 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1527 if (map_addr
== MAP_FAILED
) {
1528 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1538 /* called from signal handler: invalidate the code and unprotect the
1539 page. Return TRUE if the fault was succesfully handled. */
1540 int page_unprotect(unsigned long addr
, unsigned long pc
, void *puc
)
1542 #if !defined(CONFIG_SOFTMMU)
1545 #if defined(DEBUG_TLB)
1546 printf("page_unprotect: addr=0x%08x\n", addr
);
1548 addr
&= TARGET_PAGE_MASK
;
1550 /* if it is not mapped, no need to worry here */
1551 if (addr
>= MMAP_AREA_END
)
1553 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1556 /* NOTE: in this case, validate_tag is _not_ tested as it
1557 validates only the code TLB */
1558 if (vp
->valid_tag
!= virt_valid_tag
)
1560 if (!(vp
->prot
& PAGE_WRITE
))
1562 #if defined(DEBUG_TLB)
1563 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1564 addr
, vp
->phys_addr
, vp
->prot
);
1566 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1567 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1568 (unsigned long)addr
, vp
->prot
);
1569 /* set the dirty bit */
1570 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1571 /* flush the code inside */
1572 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1581 void tlb_flush(CPUState
*env
, int flush_global
)
1585 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1589 int tlb_set_page(CPUState
*env
, target_ulong vaddr
,
1590 target_phys_addr_t paddr
, int prot
,
1591 int is_user
, int is_softmmu
)
1596 /* dump memory mappings */
1597 void page_dump(FILE *f
)
1599 unsigned long start
, end
;
1600 int i
, j
, prot
, prot1
;
1603 fprintf(f
, "%-8s %-8s %-8s %s\n",
1604 "start", "end", "size", "prot");
1608 for(i
= 0; i
<= L1_SIZE
; i
++) {
1613 for(j
= 0;j
< L2_SIZE
; j
++) {
1618 if (prot1
!= prot
) {
1619 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1621 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1622 start
, end
, end
- start
,
1623 prot
& PAGE_READ
? 'r' : '-',
1624 prot
& PAGE_WRITE
? 'w' : '-',
1625 prot
& PAGE_EXEC
? 'x' : '-');
1639 int page_get_flags(unsigned long address
)
1643 p
= page_find(address
>> TARGET_PAGE_BITS
);
1649 /* modify the flags of a page and invalidate the code if
1650 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1651 depending on PAGE_WRITE */
1652 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1657 start
= start
& TARGET_PAGE_MASK
;
1658 end
= TARGET_PAGE_ALIGN(end
);
1659 if (flags
& PAGE_WRITE
)
1660 flags
|= PAGE_WRITE_ORG
;
1661 spin_lock(&tb_lock
);
1662 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1663 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1664 /* if the write protection is set, then we invalidate the code
1666 if (!(p
->flags
& PAGE_WRITE
) &&
1667 (flags
& PAGE_WRITE
) &&
1669 tb_invalidate_phys_page(addr
, 0, NULL
);
1673 spin_unlock(&tb_lock
);
1676 /* called from signal handler: invalidate the code and unprotect the
1677 page. Return TRUE if the fault was succesfully handled. */
1678 int page_unprotect(unsigned long address
, unsigned long pc
, void *puc
)
1680 unsigned int page_index
, prot
, pindex
;
1682 unsigned long host_start
, host_end
, addr
;
1684 host_start
= address
& qemu_host_page_mask
;
1685 page_index
= host_start
>> TARGET_PAGE_BITS
;
1686 p1
= page_find(page_index
);
1689 host_end
= host_start
+ qemu_host_page_size
;
1692 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1696 /* if the page was really writable, then we change its
1697 protection back to writable */
1698 if (prot
& PAGE_WRITE_ORG
) {
1699 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1700 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1701 mprotect((void *)host_start
, qemu_host_page_size
,
1702 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1703 p1
[pindex
].flags
|= PAGE_WRITE
;
1704 /* and since the content will be modified, we must invalidate
1705 the corresponding translated code. */
1706 tb_invalidate_phys_page(address
, pc
, puc
);
1707 #ifdef DEBUG_TB_CHECK
1708 tb_invalidate_check(address
);
1716 /* call this function when system calls directly modify a memory area */
1717 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1719 unsigned long start
, end
, addr
;
1721 start
= (unsigned long)data
;
1722 end
= start
+ data_size
;
1723 start
&= TARGET_PAGE_MASK
;
1724 end
= TARGET_PAGE_ALIGN(end
);
1725 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1726 page_unprotect(addr
, 0, NULL
);
1730 static inline void tlb_set_dirty(CPUState
*env
,
1731 unsigned long addr
, target_ulong vaddr
)
1734 #endif /* defined(CONFIG_USER_ONLY) */
1736 /* register physical memory. 'size' must be a multiple of the target
1737 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1739 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1741 unsigned long phys_offset
)
1743 target_phys_addr_t addr
, end_addr
;
1746 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1747 end_addr
= start_addr
+ size
;
1748 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1749 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1750 p
->phys_offset
= phys_offset
;
1751 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1752 phys_offset
+= TARGET_PAGE_SIZE
;
1756 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
1761 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1765 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1766 unassigned_mem_readb
,
1767 unassigned_mem_readb
,
1768 unassigned_mem_readb
,
1771 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1772 unassigned_mem_writeb
,
1773 unassigned_mem_writeb
,
1774 unassigned_mem_writeb
,
1777 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1779 unsigned long ram_addr
;
1781 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1782 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1783 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1784 #if !defined(CONFIG_USER_ONLY)
1785 tb_invalidate_phys_page_fast(ram_addr
, 1);
1786 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1789 stb_p((uint8_t *)(long)addr
, val
);
1790 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1791 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1792 /* we remove the notdirty callback only if the code has been
1794 if (dirty_flags
== 0xff)
1795 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1798 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1800 unsigned long ram_addr
;
1802 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1803 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1804 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1805 #if !defined(CONFIG_USER_ONLY)
1806 tb_invalidate_phys_page_fast(ram_addr
, 2);
1807 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1810 stw_p((uint8_t *)(long)addr
, val
);
1811 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1812 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1813 /* we remove the notdirty callback only if the code has been
1815 if (dirty_flags
== 0xff)
1816 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1819 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
1821 unsigned long ram_addr
;
1823 ram_addr
= addr
- (unsigned long)phys_ram_base
;
1824 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1825 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
1826 #if !defined(CONFIG_USER_ONLY)
1827 tb_invalidate_phys_page_fast(ram_addr
, 4);
1828 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
1831 stl_p((uint8_t *)(long)addr
, val
);
1832 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
1833 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
1834 /* we remove the notdirty callback only if the code has been
1836 if (dirty_flags
== 0xff)
1837 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
1840 static CPUReadMemoryFunc
*error_mem_read
[3] = {
1841 NULL
, /* never used */
1842 NULL
, /* never used */
1843 NULL
, /* never used */
1846 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1847 notdirty_mem_writeb
,
1848 notdirty_mem_writew
,
1849 notdirty_mem_writel
,
1852 static void io_mem_init(void)
1854 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
1855 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
1856 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
1859 /* alloc dirty bits array */
1860 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1861 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
1864 /* mem_read and mem_write are arrays of functions containing the
1865 function to access byte (index 0), word (index 1) and dword (index
1866 2). All functions must be supplied. If io_index is non zero, the
1867 corresponding io zone is modified. If it is zero, a new io zone is
1868 allocated. The return value can be used with
1869 cpu_register_physical_memory(). (-1) is returned if error. */
1870 int cpu_register_io_memory(int io_index
,
1871 CPUReadMemoryFunc
**mem_read
,
1872 CPUWriteMemoryFunc
**mem_write
,
1877 if (io_index
<= 0) {
1878 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
1880 io_index
= io_mem_nb
++;
1882 if (io_index
>= IO_MEM_NB_ENTRIES
)
1886 for(i
= 0;i
< 3; i
++) {
1887 io_mem_read
[io_index
][i
] = mem_read
[i
];
1888 io_mem_write
[io_index
][i
] = mem_write
[i
];
1890 io_mem_opaque
[io_index
] = opaque
;
1891 return io_index
<< IO_MEM_SHIFT
;
1894 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
1896 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
1899 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
1901 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
1904 /* physical memory access (slow version, mainly for debug) */
1905 #if defined(CONFIG_USER_ONLY)
1906 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1907 int len
, int is_write
)
1913 page
= addr
& TARGET_PAGE_MASK
;
1914 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1917 flags
= page_get_flags(page
);
1918 if (!(flags
& PAGE_VALID
))
1921 if (!(flags
& PAGE_WRITE
))
1923 memcpy((uint8_t *)addr
, buf
, len
);
1925 if (!(flags
& PAGE_READ
))
1927 memcpy(buf
, (uint8_t *)addr
, len
);
1936 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
1937 int len
, int is_write
)
1942 target_phys_addr_t page
;
1947 page
= addr
& TARGET_PAGE_MASK
;
1948 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1951 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
1953 pd
= IO_MEM_UNASSIGNED
;
1955 pd
= p
->phys_offset
;
1959 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
1960 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1961 /* XXX: could force cpu_single_env to NULL to avoid
1963 if (l
>= 4 && ((addr
& 3) == 0)) {
1964 /* 32 bit write access */
1966 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
1968 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1969 /* 16 bit write access */
1971 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
1974 /* 8 bit write access */
1976 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
1980 unsigned long addr1
;
1981 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
1983 ptr
= phys_ram_base
+ addr1
;
1984 memcpy(ptr
, buf
, l
);
1985 if (!cpu_physical_memory_is_dirty(addr1
)) {
1986 /* invalidate code */
1987 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
1989 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
1990 (0xff & ~CODE_DIRTY_FLAG
);
1994 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1996 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1997 if (l
>= 4 && ((addr
& 3) == 0)) {
1998 /* 32 bit read access */
1999 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2002 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2003 /* 16 bit read access */
2004 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2008 /* 8 bit read access */
2009 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2015 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2016 (addr
& ~TARGET_PAGE_MASK
);
2017 memcpy(buf
, ptr
, l
);
2026 /* warning: addr must be aligned */
2027 uint32_t ldl_phys(target_phys_addr_t addr
)
2035 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2037 pd
= IO_MEM_UNASSIGNED
;
2039 pd
= p
->phys_offset
;
2042 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
2044 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2045 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2048 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2049 (addr
& ~TARGET_PAGE_MASK
);
2056 uint32_t ldub_phys(target_phys_addr_t addr
)
2059 cpu_physical_memory_read(addr
, &val
, 1);
2064 uint32_t lduw_phys(target_phys_addr_t addr
)
2067 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2068 return tswap16(val
);
2072 uint64_t ldq_phys(target_phys_addr_t addr
)
2075 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 8);
2076 return tswap64(val
);
2079 /* warning: addr must be aligned. The ram page is not masked as dirty
2080 and the code inside is not invalidated. It is useful if the dirty
2081 bits are used to track modified PTEs */
2082 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2089 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2091 pd
= IO_MEM_UNASSIGNED
;
2093 pd
= p
->phys_offset
;
2096 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2097 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2098 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2100 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2101 (addr
& ~TARGET_PAGE_MASK
);
2106 /* warning: addr must be aligned */
2107 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2114 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2116 pd
= IO_MEM_UNASSIGNED
;
2118 pd
= p
->phys_offset
;
2121 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2122 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2123 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2125 unsigned long addr1
;
2126 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2128 ptr
= phys_ram_base
+ addr1
;
2130 if (!cpu_physical_memory_is_dirty(addr1
)) {
2131 /* invalidate code */
2132 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2134 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2135 (0xff & ~CODE_DIRTY_FLAG
);
2141 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2144 cpu_physical_memory_write(addr
, &v
, 1);
2148 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2150 uint16_t v
= tswap16(val
);
2151 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2155 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2158 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2163 /* virtual memory access for debug */
2164 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2165 uint8_t *buf
, int len
, int is_write
)
2168 target_ulong page
, phys_addr
;
2171 page
= addr
& TARGET_PAGE_MASK
;
2172 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2173 /* if no physical page mapped, return an error */
2174 if (phys_addr
== -1)
2176 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2179 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2188 void dump_exec_info(FILE *f
,
2189 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2191 int i
, target_code_size
, max_target_code_size
;
2192 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2193 TranslationBlock
*tb
;
2195 target_code_size
= 0;
2196 max_target_code_size
= 0;
2198 direct_jmp_count
= 0;
2199 direct_jmp2_count
= 0;
2200 for(i
= 0; i
< nb_tbs
; i
++) {
2202 target_code_size
+= tb
->size
;
2203 if (tb
->size
> max_target_code_size
)
2204 max_target_code_size
= tb
->size
;
2205 if (tb
->page_addr
[1] != -1)
2207 if (tb
->tb_next_offset
[0] != 0xffff) {
2209 if (tb
->tb_next_offset
[1] != 0xffff) {
2210 direct_jmp2_count
++;
2214 /* XXX: avoid using doubles ? */
2215 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2216 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2217 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2218 max_target_code_size
);
2219 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2220 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2221 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2222 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2224 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2225 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2227 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2229 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2230 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2231 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2232 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2235 #if !defined(CONFIG_USER_ONLY)
2237 #define MMUSUFFIX _cmmu
2238 #define GETPC() NULL
2239 #define env cpu_single_env
2240 #define SOFTMMU_CODE_ACCESS
2243 #include "softmmu_template.h"
2246 #include "softmmu_template.h"
2249 #include "softmmu_template.h"
2252 #include "softmmu_template.h"