]>
git.proxmox.com Git - qemu.git/blob - exec.c
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #if !defined(CONFIG_SOFTMMU)
35 //#define DEBUG_TB_INVALIDATE
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
46 #define SMC_BITMAP_USE_THRESHOLD 10
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
51 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
52 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
53 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
58 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
59 uint8_t *code_gen_ptr
;
63 uint8_t *phys_ram_base
;
64 uint8_t *phys_ram_dirty
;
66 typedef struct PageDesc
{
67 /* offset in memory of the page + io_index in the low 12 bits */
68 unsigned long phys_offset
;
69 /* list of TBs intersecting this physical page */
70 TranslationBlock
*first_tb
;
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count
;
75 #if defined(CONFIG_USER_ONLY)
80 typedef struct VirtPageDesc
{
81 /* physical address of code page. It is valid only if 'valid_tag'
82 matches 'virt_valid_tag' */
83 target_ulong phys_addr
;
84 unsigned int valid_tag
;
85 #if !defined(CONFIG_SOFTMMU)
86 /* original page access rights. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
93 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
95 #define L1_SIZE (1 << L1_BITS)
96 #define L2_SIZE (1 << L2_BITS)
98 static void io_mem_init(void);
100 unsigned long real_host_page_size
;
101 unsigned long host_page_bits
;
102 unsigned long host_page_size
;
103 unsigned long host_page_mask
;
105 static PageDesc
*l1_map
[L1_SIZE
];
107 #if !defined(CONFIG_USER_ONLY)
108 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
109 static unsigned int virt_valid_tag
;
112 /* io memory support */
113 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
114 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
115 static int io_mem_nb
;
118 char *logfilename
= "/tmp/qemu.log";
122 static void page_init(void)
124 /* NOTE: we can always suppose that host_page_size >=
127 real_host_page_size
= 4096;
129 real_host_page_size
= getpagesize();
131 if (host_page_size
== 0)
132 host_page_size
= real_host_page_size
;
133 if (host_page_size
< TARGET_PAGE_SIZE
)
134 host_page_size
= TARGET_PAGE_SIZE
;
136 while ((1 << host_page_bits
) < host_page_size
)
138 host_page_mask
= ~(host_page_size
- 1);
139 #if !defined(CONFIG_USER_ONLY)
144 static inline PageDesc
*page_find_alloc(unsigned int index
)
148 lp
= &l1_map
[index
>> L2_BITS
];
151 /* allocate if not found */
152 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
153 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
156 return p
+ (index
& (L2_SIZE
- 1));
159 static inline PageDesc
*page_find(unsigned int index
)
163 p
= l1_map
[index
>> L2_BITS
];
166 return p
+ (index
& (L2_SIZE
- 1));
169 #if !defined(CONFIG_USER_ONLY)
170 static void tlb_protect_code(CPUState
*env
, uint32_t addr
);
171 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
);
172 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
, target_ulong vaddr
);
174 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
176 VirtPageDesc
**lp
, *p
;
178 lp
= &l1_virt_map
[index
>> L2_BITS
];
181 /* allocate if not found */
182 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
183 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
186 return p
+ (index
& (L2_SIZE
- 1));
189 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
193 p
= l1_virt_map
[index
>> L2_BITS
];
196 return p
+ (index
& (L2_SIZE
- 1));
199 static void virt_page_flush(void)
206 if (virt_valid_tag
== 0) {
208 for(i
= 0; i
< L1_SIZE
; i
++) {
211 for(j
= 0; j
< L2_SIZE
; j
++)
218 static void virt_page_flush(void)
223 void cpu_exec_init(void)
226 code_gen_ptr
= code_gen_buffer
;
232 static inline void invalidate_page_bitmap(PageDesc
*p
)
234 if (p
->code_bitmap
) {
235 qemu_free(p
->code_bitmap
);
236 p
->code_bitmap
= NULL
;
238 p
->code_write_count
= 0;
241 /* set to NULL all the 'first_tb' fields in all PageDescs */
242 static void page_flush_tb(void)
247 for(i
= 0; i
< L1_SIZE
; i
++) {
250 for(j
= 0; j
< L2_SIZE
; j
++) {
252 invalidate_page_bitmap(p
);
259 /* flush all the translation blocks */
260 /* XXX: tb_flush is currently not thread safe */
261 void tb_flush(CPUState
*env
)
264 #if defined(DEBUG_FLUSH)
265 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
266 code_gen_ptr
- code_gen_buffer
,
268 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
271 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
275 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
276 tb_phys_hash
[i
] = NULL
;
279 code_gen_ptr
= code_gen_buffer
;
280 /* XXX: flush processor icache at this point if cache flush is
284 #ifdef DEBUG_TB_CHECK
286 static void tb_invalidate_check(unsigned long address
)
288 TranslationBlock
*tb
;
290 address
&= TARGET_PAGE_MASK
;
291 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
292 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
293 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
294 address
>= tb
->pc
+ tb
->size
)) {
295 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
296 address
, tb
->pc
, tb
->size
);
302 /* verify that all the pages have correct rights for code */
303 static void tb_page_check(void)
305 TranslationBlock
*tb
;
306 int i
, flags1
, flags2
;
308 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
309 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
310 flags1
= page_get_flags(tb
->pc
);
311 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
312 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
313 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
314 tb
->pc
, tb
->size
, flags1
, flags2
);
320 void tb_jmp_check(TranslationBlock
*tb
)
322 TranslationBlock
*tb1
;
325 /* suppress any remaining jumps to this TB */
329 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
332 tb1
= tb1
->jmp_next
[n1
];
334 /* check end of list */
336 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
342 /* invalidate one TB */
343 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
346 TranslationBlock
*tb1
;
350 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
353 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
357 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
359 TranslationBlock
*tb1
;
365 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
367 *ptb
= tb1
->page_next
[n1
];
370 ptb
= &tb1
->page_next
[n1
];
374 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
376 TranslationBlock
*tb1
, **ptb
;
379 ptb
= &tb
->jmp_next
[n
];
382 /* find tb(n) in circular list */
386 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
387 if (n1
== n
&& tb1
== tb
)
390 ptb
= &tb1
->jmp_first
;
392 ptb
= &tb1
->jmp_next
[n1
];
395 /* now we can suppress tb(n) from the list */
396 *ptb
= tb
->jmp_next
[n
];
398 tb
->jmp_next
[n
] = NULL
;
402 /* reset the jump entry 'n' of a TB so that it is not chained to
404 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
406 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
409 static inline void tb_invalidate(TranslationBlock
*tb
)
412 TranslationBlock
*tb1
, *tb2
, **ptb
;
414 tb_invalidated_flag
= 1;
416 /* remove the TB from the hash list */
417 h
= tb_hash_func(tb
->pc
);
421 /* NOTE: the TB is not necessarily linked in the hash. It
422 indicates that it is not currently used */
426 *ptb
= tb1
->hash_next
;
429 ptb
= &tb1
->hash_next
;
432 /* suppress this TB from the two jump lists */
433 tb_jmp_remove(tb
, 0);
434 tb_jmp_remove(tb
, 1);
436 /* suppress any remaining jumps to this TB */
442 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
443 tb2
= tb1
->jmp_next
[n1
];
444 tb_reset_jump(tb1
, n1
);
445 tb1
->jmp_next
[n1
] = NULL
;
448 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
451 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
455 target_ulong phys_pc
;
457 /* remove the TB from the hash list */
458 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
459 h
= tb_phys_hash_func(phys_pc
);
460 tb_remove(&tb_phys_hash
[h
], tb
,
461 offsetof(TranslationBlock
, phys_hash_next
));
463 /* remove the TB from the page list */
464 if (tb
->page_addr
[0] != page_addr
) {
465 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
466 tb_page_remove(&p
->first_tb
, tb
);
467 invalidate_page_bitmap(p
);
469 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
470 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
471 tb_page_remove(&p
->first_tb
, tb
);
472 invalidate_page_bitmap(p
);
478 static inline void set_bits(uint8_t *tab
, int start
, int len
)
484 mask
= 0xff << (start
& 7);
485 if ((start
& ~7) == (end
& ~7)) {
487 mask
&= ~(0xff << (end
& 7));
492 start
= (start
+ 8) & ~7;
494 while (start
< end1
) {
499 mask
= ~(0xff << (end
& 7));
505 static void build_page_bitmap(PageDesc
*p
)
507 int n
, tb_start
, tb_end
;
508 TranslationBlock
*tb
;
510 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
513 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
518 tb
= (TranslationBlock
*)((long)tb
& ~3);
519 /* NOTE: this is subtle as a TB may span two physical pages */
521 /* NOTE: tb_end may be after the end of the page, but
522 it is not a problem */
523 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
524 tb_end
= tb_start
+ tb
->size
;
525 if (tb_end
> TARGET_PAGE_SIZE
)
526 tb_end
= TARGET_PAGE_SIZE
;
529 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
531 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
532 tb
= tb
->page_next
[n
];
536 /* invalidate all TBs which intersect with the target physical page
537 starting in range [start;end[. NOTE: start and end must refer to
538 the same physical page. 'vaddr' is a virtual address referencing
539 the physical page of code. It is only used an a hint if there is no
541 static void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
546 TranslationBlock
*tb
, *tb_next
;
547 target_ulong tb_start
, tb_end
;
549 p
= page_find(start
>> TARGET_PAGE_BITS
);
552 if (!p
->code_bitmap
&&
553 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
554 /* build code bitmap */
555 build_page_bitmap(p
);
558 /* we remove all the TBs in the range [start, end[ */
559 /* XXX: see if in some cases it could be faster to invalidate all the code */
563 tb
= (TranslationBlock
*)((long)tb
& ~3);
564 tb_next
= tb
->page_next
[n
];
565 /* NOTE: this is subtle as a TB may span two physical pages */
567 /* NOTE: tb_end may be after the end of the page, but
568 it is not a problem */
569 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
570 tb_end
= tb_start
+ tb
->size
;
572 tb_start
= tb
->page_addr
[1];
573 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
575 if (!(tb_end
<= start
|| tb_start
>= end
)) {
576 tb_phys_invalidate(tb
, -1);
580 #if !defined(CONFIG_USER_ONLY)
581 /* if no code remaining, no need to continue to use slow writes */
583 invalidate_page_bitmap(p
);
584 tlb_unprotect_code_phys(cpu_single_env
, start
, vaddr
);
589 /* len must be <= 8 and start must be a multiple of len */
590 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
, target_ulong vaddr
)
595 if (cpu_single_env
->cr
[0] & CR0_PE_MASK
) {
596 printf("modifying code at 0x%x size=%d EIP=%x\n",
597 (vaddr
& TARGET_PAGE_MASK
) | (start
& ~TARGET_PAGE_MASK
), len
,
598 cpu_single_env
->eip
);
601 p
= page_find(start
>> TARGET_PAGE_BITS
);
604 if (p
->code_bitmap
) {
605 offset
= start
& ~TARGET_PAGE_MASK
;
606 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
607 if (b
& ((1 << len
) - 1))
611 tb_invalidate_phys_page_range(start
, start
+ len
, vaddr
);
615 /* invalidate all TBs which intersect with the target virtual page
616 starting in range [start;end[. This function is usually used when
617 the target processor flushes its I-cache. NOTE: start and end must
618 refer to the same physical page */
619 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
623 TranslationBlock
*tb
, *tb_next
;
625 target_ulong phys_start
;
627 #if !defined(CONFIG_USER_ONLY)
630 vp
= virt_page_find(start
>> TARGET_PAGE_BITS
);
633 if (vp
->valid_tag
!= virt_valid_tag
)
635 phys_start
= vp
->phys_addr
+ (start
& ~TARGET_PAGE_MASK
);
640 p
= page_find(phys_start
>> TARGET_PAGE_BITS
);
643 /* we remove all the TBs in the range [start, end[ */
644 /* XXX: see if in some cases it could be faster to invalidate all the code */
648 tb
= (TranslationBlock
*)((long)tb
& ~3);
649 tb_next
= tb
->page_next
[n
];
651 if (!((pc
+ tb
->size
) <= start
|| pc
>= end
)) {
652 tb_phys_invalidate(tb
, -1);
656 #if !defined(CONFIG_USER_ONLY)
657 /* if no code remaining, no need to continue to use slow writes */
659 tlb_unprotect_code(cpu_single_env
, start
);
663 #if !defined(CONFIG_SOFTMMU)
664 static void tb_invalidate_phys_page(target_ulong addr
)
668 TranslationBlock
*tb
;
670 addr
&= TARGET_PAGE_MASK
;
671 p
= page_find(addr
>> TARGET_PAGE_BITS
);
677 tb
= (TranslationBlock
*)((long)tb
& ~3);
678 tb_phys_invalidate(tb
, addr
);
679 tb
= tb
->page_next
[n
];
685 /* add the tb in the target page and protect it if necessary */
686 static inline void tb_alloc_page(TranslationBlock
*tb
,
687 unsigned int n
, unsigned int page_addr
)
690 TranslationBlock
*last_first_tb
;
692 tb
->page_addr
[n
] = page_addr
;
693 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
694 tb
->page_next
[n
] = p
->first_tb
;
695 last_first_tb
= p
->first_tb
;
696 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
697 invalidate_page_bitmap(p
);
699 #if defined(CONFIG_USER_ONLY)
700 if (p
->flags
& PAGE_WRITE
) {
701 unsigned long host_start
, host_end
, addr
;
704 /* force the host page as non writable (writes will have a
705 page fault + mprotect overhead) */
706 host_start
= page_addr
& host_page_mask
;
707 host_end
= host_start
+ host_page_size
;
709 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
710 prot
|= page_get_flags(addr
);
711 mprotect((void *)host_start
, host_page_size
,
712 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
713 #ifdef DEBUG_TB_INVALIDATE
714 printf("protecting code page: 0x%08lx\n",
717 p
->flags
&= ~PAGE_WRITE
;
720 /* if some code is already present, then the pages are already
721 protected. So we handle the case where only the first TB is
722 allocated in a physical page */
723 if (!last_first_tb
) {
724 target_ulong virt_addr
;
726 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
727 tlb_protect_code(cpu_single_env
, virt_addr
);
732 /* Allocate a new translation block. Flush the translation buffer if
733 too many translation blocks or too much generated code. */
734 TranslationBlock
*tb_alloc(unsigned long pc
)
736 TranslationBlock
*tb
;
738 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
739 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
747 /* add a new TB and link it to the physical page tables. phys_page2 is
748 (-1) to indicate that only one page contains the TB. */
749 void tb_link_phys(TranslationBlock
*tb
,
750 target_ulong phys_pc
, target_ulong phys_page2
)
753 TranslationBlock
**ptb
;
755 /* add in the physical hash table */
756 h
= tb_phys_hash_func(phys_pc
);
757 ptb
= &tb_phys_hash
[h
];
758 tb
->phys_hash_next
= *ptb
;
761 /* add in the page list */
762 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
763 if (phys_page2
!= -1)
764 tb_alloc_page(tb
, 1, phys_page2
);
766 tb
->page_addr
[1] = -1;
767 #ifdef DEBUG_TB_CHECK
772 /* link the tb with the other TBs */
773 void tb_link(TranslationBlock
*tb
)
775 #if !defined(CONFIG_USER_ONLY)
780 /* save the code memory mappings (needed to invalidate the code) */
781 addr
= tb
->pc
& TARGET_PAGE_MASK
;
782 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
783 #ifdef DEBUG_TLB_CHECK
784 if (vp
->valid_tag
== virt_valid_tag
&&
785 vp
->phys_addr
!= tb
->page_addr
[0]) {
786 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
787 addr
, tb
->page_addr
[0], vp
->phys_addr
);
790 vp
->phys_addr
= tb
->page_addr
[0];
791 if (vp
->valid_tag
!= virt_valid_tag
) {
792 vp
->valid_tag
= virt_valid_tag
;
793 #if !defined(CONFIG_SOFTMMU)
798 if (tb
->page_addr
[1] != -1) {
799 addr
+= TARGET_PAGE_SIZE
;
800 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
801 #ifdef DEBUG_TLB_CHECK
802 if (vp
->valid_tag
== virt_valid_tag
&&
803 vp
->phys_addr
!= tb
->page_addr
[1]) {
804 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
805 addr
, tb
->page_addr
[1], vp
->phys_addr
);
808 vp
->phys_addr
= tb
->page_addr
[1];
809 if (vp
->valid_tag
!= virt_valid_tag
) {
810 vp
->valid_tag
= virt_valid_tag
;
811 #if !defined(CONFIG_SOFTMMU)
819 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
820 tb
->jmp_next
[0] = NULL
;
821 tb
->jmp_next
[1] = NULL
;
823 tb
->cflags
&= ~CF_FP_USED
;
824 if (tb
->cflags
& CF_TB_FP_USED
)
825 tb
->cflags
|= CF_FP_USED
;
828 /* init original jump addresses */
829 if (tb
->tb_next_offset
[0] != 0xffff)
830 tb_reset_jump(tb
, 0);
831 if (tb
->tb_next_offset
[1] != 0xffff)
832 tb_reset_jump(tb
, 1);
835 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
836 tb[1].tc_ptr. Return NULL if not found */
837 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
841 TranslationBlock
*tb
;
845 if (tc_ptr
< (unsigned long)code_gen_buffer
||
846 tc_ptr
>= (unsigned long)code_gen_ptr
)
848 /* binary search (cf Knuth) */
851 while (m_min
<= m_max
) {
852 m
= (m_min
+ m_max
) >> 1;
854 v
= (unsigned long)tb
->tc_ptr
;
857 else if (tc_ptr
< v
) {
866 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
868 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
870 TranslationBlock
*tb1
, *tb_next
, **ptb
;
873 tb1
= tb
->jmp_next
[n
];
875 /* find head of list */
878 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
881 tb1
= tb1
->jmp_next
[n1
];
883 /* we are now sure now that tb jumps to tb1 */
886 /* remove tb from the jmp_first list */
887 ptb
= &tb_next
->jmp_first
;
891 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
892 if (n1
== n
&& tb1
== tb
)
894 ptb
= &tb1
->jmp_next
[n1
];
896 *ptb
= tb
->jmp_next
[n
];
897 tb
->jmp_next
[n
] = NULL
;
899 /* suppress the jump to next tb in generated code */
900 tb_reset_jump(tb
, n
);
902 /* suppress jumps in the tb on which we could have jumped */
903 tb_reset_jump_recursive(tb_next
);
907 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
909 tb_reset_jump_recursive2(tb
, 0);
910 tb_reset_jump_recursive2(tb
, 1);
913 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
914 breakpoint is reached */
915 int cpu_breakpoint_insert(CPUState
*env
, uint32_t pc
)
917 #if defined(TARGET_I386) || defined(TARGET_PPC)
920 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
921 if (env
->breakpoints
[i
] == pc
)
925 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
927 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
928 tb_invalidate_page_range(pc
, pc
+ 1);
935 /* remove a breakpoint */
936 int cpu_breakpoint_remove(CPUState
*env
, uint32_t pc
)
938 #if defined(TARGET_I386) || defined(TARGET_PPC)
940 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
941 if (env
->breakpoints
[i
] == pc
)
946 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
947 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
948 env
->nb_breakpoints
--;
949 tb_invalidate_page_range(pc
, pc
+ 1);
956 /* enable or disable single step mode. EXCP_DEBUG is returned by the
957 CPU loop after each instruction */
958 void cpu_single_step(CPUState
*env
, int enabled
)
960 #if defined(TARGET_I386) || defined(TARGET_PPC)
961 if (env
->singlestep_enabled
!= enabled
) {
962 env
->singlestep_enabled
= enabled
;
963 /* must flush all the translated code to avoid inconsistancies */
964 /* XXX: only flush what is necessary */
970 /* enable or disable low levels log */
971 void cpu_set_log(int log_flags
)
973 loglevel
= log_flags
;
974 if (loglevel
&& !logfile
) {
975 logfile
= fopen(logfilename
, "w");
980 #if !defined(CONFIG_SOFTMMU)
981 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
983 static uint8_t logfile_buf
[4096];
984 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
987 setvbuf(logfile
, NULL
, _IOLBF
, 0);
992 void cpu_set_log_filename(const char *filename
)
994 logfilename
= strdup(filename
);
997 /* mask must never be zero, except for A20 change call */
998 void cpu_interrupt(CPUState
*env
, int mask
)
1000 TranslationBlock
*tb
;
1001 static int interrupt_lock
;
1003 env
->interrupt_request
|= mask
;
1004 /* if the cpu is currently executing code, we must unlink it and
1005 all the potentially executing TB */
1006 tb
= env
->current_tb
;
1007 if (tb
&& !testandset(&interrupt_lock
)) {
1008 env
->current_tb
= NULL
;
1009 tb_reset_jump_recursive(tb
);
1014 CPULogItem cpu_log_items
[] = {
1015 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1016 "show generated host assembly code for each compiled TB" },
1017 { CPU_LOG_TB_IN_ASM
, "in_asm",
1018 "show target assembly code for each compiled TB" },
1019 { CPU_LOG_TB_OP
, "op",
1020 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1022 { CPU_LOG_TB_OP_OPT
, "op_opt",
1023 "show micro ops after optimization for each compiled TB" },
1025 { CPU_LOG_INT
, "int",
1026 "show interrupts/exceptions in short format" },
1027 { CPU_LOG_EXEC
, "exec",
1028 "show trace before each executed TB (lots of logs)" },
1030 { CPU_LOG_PCALL
, "pcall",
1031 "show protected mode far calls/returns/exceptions" },
1036 static int cmp1(const char *s1
, int n
, const char *s2
)
1038 if (strlen(s2
) != n
)
1040 return memcmp(s1
, s2
, n
) == 0;
1043 /* takes a comma separated list of log masks. Return 0 if error. */
1044 int cpu_str_to_log_mask(const char *str
)
1053 p1
= strchr(p
, ',');
1056 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1057 if (cmp1(p
, p1
- p
, item
->name
))
1070 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1075 fprintf(stderr
, "qemu: fatal: ");
1076 vfprintf(stderr
, fmt
, ap
);
1077 fprintf(stderr
, "\n");
1079 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1085 #if !defined(CONFIG_USER_ONLY)
1087 /* NOTE: if flush_global is true, also flush global entries (not
1089 void tlb_flush(CPUState
*env
, int flush_global
)
1093 #if defined(DEBUG_TLB)
1094 printf("tlb_flush:\n");
1096 /* must reset current TB so that interrupts cannot modify the
1097 links while we are modifying them */
1098 env
->current_tb
= NULL
;
1100 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1101 env
->tlb_read
[0][i
].address
= -1;
1102 env
->tlb_write
[0][i
].address
= -1;
1103 env
->tlb_read
[1][i
].address
= -1;
1104 env
->tlb_write
[1][i
].address
= -1;
1108 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1111 #if !defined(CONFIG_SOFTMMU)
1112 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1116 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1118 if (addr
== (tlb_entry
->address
&
1119 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1120 tlb_entry
->address
= -1;
1123 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1128 TranslationBlock
*tb
;
1130 #if defined(DEBUG_TLB)
1131 printf("tlb_flush_page: 0x%08x\n", addr
);
1133 /* must reset current TB so that interrupts cannot modify the
1134 links while we are modifying them */
1135 env
->current_tb
= NULL
;
1137 addr
&= TARGET_PAGE_MASK
;
1138 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1139 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1140 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1141 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1142 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1144 /* remove from the virtual pc hash table all the TB at this
1147 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1148 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1149 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1151 /* we remove all the links to the TBs in this virtual page */
1153 while (tb
!= NULL
) {
1155 tb
= (TranslationBlock
*)((long)tb
& ~3);
1156 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1157 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1160 tb
= tb
->page_next
[n
];
1166 #if !defined(CONFIG_SOFTMMU)
1167 if (addr
< MMAP_AREA_END
)
1168 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1172 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1174 if (addr
== (tlb_entry
->address
&
1175 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1176 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1177 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1178 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1182 /* update the TLBs so that writes to code in the virtual page 'addr'
1184 static void tlb_protect_code(CPUState
*env
, uint32_t addr
)
1188 addr
&= TARGET_PAGE_MASK
;
1189 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1190 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1191 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1192 #if !defined(CONFIG_SOFTMMU)
1193 /* NOTE: as we generated the code for this page, it is already at
1195 if (addr
< MMAP_AREA_END
)
1196 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1200 static inline void tlb_unprotect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1202 if (addr
== (tlb_entry
->address
&
1203 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1204 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
) {
1205 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1209 /* update the TLB so that writes in virtual page 'addr' are no longer
1210 tested self modifying code */
1211 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
)
1215 addr
&= TARGET_PAGE_MASK
;
1216 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1217 tlb_unprotect_code1(&env
->tlb_write
[0][i
], addr
);
1218 tlb_unprotect_code1(&env
->tlb_write
[1][i
], addr
);
1221 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1224 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1225 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1226 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1230 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1231 tested self modifying code */
1232 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
, target_ulong vaddr
)
1236 phys_addr
&= TARGET_PAGE_MASK
;
1237 phys_addr
+= (long)phys_ram_base
;
1238 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1239 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1240 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1243 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1244 unsigned long start
, unsigned long length
)
1247 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1248 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1249 if ((addr
- start
) < length
) {
1250 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1255 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1258 target_ulong length
, start1
;
1261 start
&= TARGET_PAGE_MASK
;
1262 end
= TARGET_PAGE_ALIGN(end
);
1264 length
= end
- start
;
1267 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1269 env
= cpu_single_env
;
1270 /* we modify the TLB cache so that the dirty bit will be set again
1271 when accessing the range */
1272 start1
= start
+ (unsigned long)phys_ram_base
;
1273 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1274 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1275 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1276 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1278 #if !defined(CONFIG_SOFTMMU)
1279 /* XXX: this is expensive */
1285 for(i
= 0; i
< L1_SIZE
; i
++) {
1288 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1289 for(j
= 0; j
< L2_SIZE
; j
++) {
1290 if (p
->valid_tag
== virt_valid_tag
&&
1291 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1292 (p
->prot
& PROT_WRITE
)) {
1293 if (addr
< MMAP_AREA_END
) {
1294 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1295 p
->prot
& ~PROT_WRITE
);
1298 addr
+= TARGET_PAGE_SIZE
;
1307 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1308 unsigned long start
)
1311 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1312 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1313 if (addr
== start
) {
1314 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1319 /* update the TLB corresponding to virtual page vaddr and phys addr
1320 addr so that it is no longer dirty */
1321 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1323 CPUState
*env
= cpu_single_env
;
1326 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1328 addr
&= TARGET_PAGE_MASK
;
1329 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1330 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1331 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1334 /* add a new TLB entry. At most one entry for a given virtual address
1335 is permitted. Return 0 if OK or 2 if the page could not be mapped
1336 (can only happen in non SOFTMMU mode for I/O pages or pages
1337 conflicting with the host address space). */
1338 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1339 int is_user
, int is_softmmu
)
1343 TranslationBlock
*first_tb
;
1345 target_ulong address
, addend
;
1348 p
= page_find(paddr
>> TARGET_PAGE_BITS
);
1350 pd
= IO_MEM_UNASSIGNED
;
1353 pd
= p
->phys_offset
;
1354 first_tb
= p
->first_tb
;
1356 #if defined(DEBUG_TLB)
1357 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1358 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1362 #if !defined(CONFIG_SOFTMMU)
1366 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1367 /* IO memory case */
1368 address
= vaddr
| pd
;
1371 /* standard memory */
1373 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1376 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1378 if (prot
& PAGE_READ
) {
1379 env
->tlb_read
[is_user
][index
].address
= address
;
1380 env
->tlb_read
[is_user
][index
].addend
= addend
;
1382 env
->tlb_read
[is_user
][index
].address
= -1;
1383 env
->tlb_read
[is_user
][index
].addend
= -1;
1385 if (prot
& PAGE_WRITE
) {
1386 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1387 /* ROM: access is ignored (same as unassigned) */
1388 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1389 env
->tlb_write
[is_user
][index
].addend
= addend
;
1390 } else if (first_tb
) {
1391 /* if code is present, we use a specific memory
1392 handler. It works only for physical memory access */
1393 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1394 env
->tlb_write
[is_user
][index
].addend
= addend
;
1395 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1396 !cpu_physical_memory_is_dirty(pd
)) {
1397 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1398 env
->tlb_write
[is_user
][index
].addend
= addend
;
1400 env
->tlb_write
[is_user
][index
].address
= address
;
1401 env
->tlb_write
[is_user
][index
].addend
= addend
;
1404 env
->tlb_write
[is_user
][index
].address
= -1;
1405 env
->tlb_write
[is_user
][index
].addend
= -1;
1408 #if !defined(CONFIG_SOFTMMU)
1410 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1411 /* IO access: no mapping is done as it will be handled by the
1413 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1418 if (vaddr
>= MMAP_AREA_END
) {
1421 if (prot
& PROT_WRITE
) {
1422 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1424 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1425 !cpu_physical_memory_is_dirty(pd
))) {
1426 /* ROM: we do as if code was inside */
1427 /* if code is present, we only map as read only and save the
1431 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1434 vp
->valid_tag
= virt_valid_tag
;
1435 prot
&= ~PAGE_WRITE
;
1438 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1439 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1440 if (map_addr
== MAP_FAILED
) {
1441 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1451 /* called from signal handler: invalidate the code and unprotect the
1452 page. Return TRUE if the fault was succesfully handled. */
1453 int page_unprotect(unsigned long addr
)
1455 #if !defined(CONFIG_SOFTMMU)
1458 #if defined(DEBUG_TLB)
1459 printf("page_unprotect: addr=0x%08x\n", addr
);
1461 addr
&= TARGET_PAGE_MASK
;
1463 /* if it is not mapped, no need to worry here */
1464 if (addr
>= MMAP_AREA_END
)
1466 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1469 /* NOTE: in this case, validate_tag is _not_ tested as it
1470 validates only the code TLB */
1471 if (vp
->valid_tag
!= virt_valid_tag
)
1473 if (!(vp
->prot
& PAGE_WRITE
))
1475 #if defined(DEBUG_TLB)
1476 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1477 addr
, vp
->phys_addr
, vp
->prot
);
1479 /* set the dirty bit */
1480 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1481 /* flush the code inside */
1482 tb_invalidate_phys_page(vp
->phys_addr
);
1483 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1484 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1485 (unsigned long)addr
, vp
->prot
);
1494 void tlb_flush(CPUState
*env
, int flush_global
)
1498 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1502 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
)
1506 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1507 int is_user
, int is_softmmu
)
1512 /* dump memory mappings */
1513 void page_dump(FILE *f
)
1515 unsigned long start
, end
;
1516 int i
, j
, prot
, prot1
;
1519 fprintf(f
, "%-8s %-8s %-8s %s\n",
1520 "start", "end", "size", "prot");
1524 for(i
= 0; i
<= L1_SIZE
; i
++) {
1529 for(j
= 0;j
< L2_SIZE
; j
++) {
1534 if (prot1
!= prot
) {
1535 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1537 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1538 start
, end
, end
- start
,
1539 prot
& PAGE_READ
? 'r' : '-',
1540 prot
& PAGE_WRITE
? 'w' : '-',
1541 prot
& PAGE_EXEC
? 'x' : '-');
1555 int page_get_flags(unsigned long address
)
1559 p
= page_find(address
>> TARGET_PAGE_BITS
);
1565 /* modify the flags of a page and invalidate the code if
1566 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1567 depending on PAGE_WRITE */
1568 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1573 start
= start
& TARGET_PAGE_MASK
;
1574 end
= TARGET_PAGE_ALIGN(end
);
1575 if (flags
& PAGE_WRITE
)
1576 flags
|= PAGE_WRITE_ORG
;
1577 spin_lock(&tb_lock
);
1578 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1579 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1580 /* if the write protection is set, then we invalidate the code
1582 if (!(p
->flags
& PAGE_WRITE
) &&
1583 (flags
& PAGE_WRITE
) &&
1585 tb_invalidate_phys_page(addr
);
1589 spin_unlock(&tb_lock
);
1592 /* called from signal handler: invalidate the code and unprotect the
1593 page. Return TRUE if the fault was succesfully handled. */
1594 int page_unprotect(unsigned long address
)
1596 unsigned int page_index
, prot
, pindex
;
1598 unsigned long host_start
, host_end
, addr
;
1600 host_start
= address
& host_page_mask
;
1601 page_index
= host_start
>> TARGET_PAGE_BITS
;
1602 p1
= page_find(page_index
);
1605 host_end
= host_start
+ host_page_size
;
1608 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1612 /* if the page was really writable, then we change its
1613 protection back to writable */
1614 if (prot
& PAGE_WRITE_ORG
) {
1615 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1616 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1617 mprotect((void *)host_start
, host_page_size
,
1618 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1619 p1
[pindex
].flags
|= PAGE_WRITE
;
1620 /* and since the content will be modified, we must invalidate
1621 the corresponding translated code. */
1622 tb_invalidate_phys_page(address
);
1623 #ifdef DEBUG_TB_CHECK
1624 tb_invalidate_check(address
);
1632 /* call this function when system calls directly modify a memory area */
1633 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1635 unsigned long start
, end
, addr
;
1637 start
= (unsigned long)data
;
1638 end
= start
+ data_size
;
1639 start
&= TARGET_PAGE_MASK
;
1640 end
= TARGET_PAGE_ALIGN(end
);
1641 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1642 page_unprotect(addr
);
1646 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1650 #endif /* defined(CONFIG_USER_ONLY) */
1652 /* register physical memory. 'size' must be a multiple of the target
1653 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1655 void cpu_register_physical_memory(unsigned long start_addr
, unsigned long size
,
1658 unsigned long addr
, end_addr
;
1661 end_addr
= start_addr
+ size
;
1662 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1663 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1664 p
->phys_offset
= phys_offset
;
1665 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1666 phys_offset
+= TARGET_PAGE_SIZE
;
1670 static uint32_t unassigned_mem_readb(uint32_t addr
)
1675 static void unassigned_mem_writeb(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1679 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1680 unassigned_mem_readb
,
1681 unassigned_mem_readb
,
1682 unassigned_mem_readb
,
1685 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1686 unassigned_mem_writeb
,
1687 unassigned_mem_writeb
,
1688 unassigned_mem_writeb
,
1691 /* self modifying code support in soft mmu mode : writing to a page
1692 containing code comes to these functions */
1694 static void code_mem_writeb(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1696 unsigned long phys_addr
;
1698 phys_addr
= addr
- (long)phys_ram_base
;
1699 #if !defined(CONFIG_USER_ONLY)
1700 tb_invalidate_phys_page_fast(phys_addr
, 1, vaddr
);
1702 stb_raw((uint8_t *)addr
, val
);
1703 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1706 static void code_mem_writew(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1708 unsigned long phys_addr
;
1710 phys_addr
= addr
- (long)phys_ram_base
;
1711 #if !defined(CONFIG_USER_ONLY)
1712 tb_invalidate_phys_page_fast(phys_addr
, 2, vaddr
);
1714 stw_raw((uint8_t *)addr
, val
);
1715 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1718 static void code_mem_writel(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1720 unsigned long phys_addr
;
1722 phys_addr
= addr
- (long)phys_ram_base
;
1723 #if !defined(CONFIG_USER_ONLY)
1724 tb_invalidate_phys_page_fast(phys_addr
, 4, vaddr
);
1726 stl_raw((uint8_t *)addr
, val
);
1727 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1730 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1731 NULL
, /* never used */
1732 NULL
, /* never used */
1733 NULL
, /* never used */
1736 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1742 static void notdirty_mem_writeb(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1744 stb_raw((uint8_t *)addr
, val
);
1745 tlb_set_dirty(addr
, vaddr
);
1748 static void notdirty_mem_writew(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1750 stw_raw((uint8_t *)addr
, val
);
1751 tlb_set_dirty(addr
, vaddr
);
1754 static void notdirty_mem_writel(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1756 stl_raw((uint8_t *)addr
, val
);
1757 tlb_set_dirty(addr
, vaddr
);
1760 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1761 notdirty_mem_writeb
,
1762 notdirty_mem_writew
,
1763 notdirty_mem_writel
,
1766 static void io_mem_init(void)
1768 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
);
1769 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
);
1770 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
);
1771 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
);
1774 /* alloc dirty bits array */
1775 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1778 /* mem_read and mem_write are arrays of functions containing the
1779 function to access byte (index 0), word (index 1) and dword (index
1780 2). All functions must be supplied. If io_index is non zero, the
1781 corresponding io zone is modified. If it is zero, a new io zone is
1782 allocated. The return value can be used with
1783 cpu_register_physical_memory(). (-1) is returned if error. */
1784 int cpu_register_io_memory(int io_index
,
1785 CPUReadMemoryFunc
**mem_read
,
1786 CPUWriteMemoryFunc
**mem_write
)
1790 if (io_index
<= 0) {
1791 if (io_index
>= IO_MEM_NB_ENTRIES
)
1793 io_index
= io_mem_nb
++;
1795 if (io_index
>= IO_MEM_NB_ENTRIES
)
1799 for(i
= 0;i
< 3; i
++) {
1800 io_mem_read
[io_index
][i
] = mem_read
[i
];
1801 io_mem_write
[io_index
][i
] = mem_write
[i
];
1803 return io_index
<< IO_MEM_SHIFT
;
1806 /* physical memory access (slow version, mainly for debug) */
1807 #if defined(CONFIG_USER_ONLY)
1808 void cpu_physical_memory_rw(target_ulong addr
, uint8_t *buf
,
1809 int len
, int is_write
)
1815 page
= addr
& TARGET_PAGE_MASK
;
1816 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1819 flags
= page_get_flags(page
);
1820 if (!(flags
& PAGE_VALID
))
1823 if (!(flags
& PAGE_WRITE
))
1825 memcpy((uint8_t *)addr
, buf
, len
);
1827 if (!(flags
& PAGE_READ
))
1829 memcpy(buf
, (uint8_t *)addr
, len
);
1837 void cpu_physical_memory_rw(target_ulong addr
, uint8_t *buf
,
1838 int len
, int is_write
)
1843 target_ulong page
, pd
;
1847 page
= addr
& TARGET_PAGE_MASK
;
1848 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1851 p
= page_find(page
>> TARGET_PAGE_BITS
);
1853 pd
= IO_MEM_UNASSIGNED
;
1855 pd
= p
->phys_offset
;
1859 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
1860 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1861 if (l
>= 4 && ((addr
& 3) == 0)) {
1862 /* 32 bit read access */
1864 io_mem_write
[io_index
][2](addr
, val
, 0);
1866 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1867 /* 16 bit read access */
1868 val
= lduw_raw(buf
);
1869 io_mem_write
[io_index
][1](addr
, val
, 0);
1873 val
= ldub_raw(buf
);
1874 io_mem_write
[io_index
][0](addr
, val
, 0);
1878 unsigned long addr1
;
1879 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
1881 ptr
= phys_ram_base
+ addr1
;
1882 memcpy(ptr
, buf
, l
);
1883 /* invalidate code */
1884 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
1886 phys_ram_dirty
[page
>> TARGET_PAGE_BITS
] = 1;
1889 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
1890 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
1892 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1893 if (l
>= 4 && ((addr
& 3) == 0)) {
1894 /* 32 bit read access */
1895 val
= io_mem_read
[io_index
][2](addr
);
1898 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1899 /* 16 bit read access */
1900 val
= io_mem_read
[io_index
][1](addr
);
1905 val
= io_mem_read
[io_index
][0](addr
);
1911 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
1912 (addr
& ~TARGET_PAGE_MASK
);
1913 memcpy(buf
, ptr
, l
);
1923 /* virtual memory access for debug */
1924 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
1925 uint8_t *buf
, int len
, int is_write
)
1928 target_ulong page
, phys_addr
;
1931 page
= addr
& TARGET_PAGE_MASK
;
1932 phys_addr
= cpu_get_phys_page_debug(env
, page
);
1933 /* if no physical page mapped, return an error */
1934 if (phys_addr
== -1)
1936 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1939 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
1948 #if !defined(CONFIG_USER_ONLY)
1950 #define MMUSUFFIX _cmmu
1951 #define GETPC() NULL
1952 #define env cpu_single_env
1955 #include "softmmu_template.h"
1958 #include "softmmu_template.h"
1961 #include "softmmu_template.h"
1964 #include "softmmu_template.h"