]>
git.proxmox.com Git - qemu.git/blob - exec.c
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 //#define DEBUG_TB_INVALIDATE
37 /* make various TB consistency checks */
38 //#define DEBUG_TB_CHECK
39 //#define DEBUG_TLB_CHECK
41 /* threshold to flush the translated code buffer */
42 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
44 #define SMC_BITMAP_USE_THRESHOLD 10
46 #define MMAP_AREA_START 0x00000000
47 #define MMAP_AREA_END 0xa8000000
49 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
50 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
51 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
53 /* any access to the tbs or the page table must use this lock */
54 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
56 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
57 uint8_t *code_gen_ptr
;
61 uint8_t *phys_ram_base
;
62 uint8_t *phys_ram_dirty
;
64 typedef struct PageDesc
{
65 /* offset in memory of the page + io_index in the low 12 bits */
66 unsigned long phys_offset
;
67 /* list of TBs intersecting this physical page */
68 TranslationBlock
*first_tb
;
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count
;
73 #if defined(CONFIG_USER_ONLY)
78 typedef struct VirtPageDesc
{
79 /* physical address of code page. It is valid only if 'valid_tag'
80 matches 'virt_valid_tag' */
81 target_ulong phys_addr
;
82 unsigned int valid_tag
;
83 #if !defined(CONFIG_SOFTMMU)
84 /* original page access rights. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
91 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
93 #define L1_SIZE (1 << L1_BITS)
94 #define L2_SIZE (1 << L2_BITS)
96 static void io_mem_init(void);
98 unsigned long real_host_page_size
;
99 unsigned long host_page_bits
;
100 unsigned long host_page_size
;
101 unsigned long host_page_mask
;
103 static PageDesc
*l1_map
[L1_SIZE
];
105 #if !defined(CONFIG_USER_ONLY)
106 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
107 static unsigned int virt_valid_tag
;
110 /* io memory support */
111 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
112 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
113 static int io_mem_nb
;
116 char *logfilename
= "/tmp/qemu.log";
120 static void page_init(void)
122 /* NOTE: we can always suppose that host_page_size >=
124 real_host_page_size
= getpagesize();
125 if (host_page_size
== 0)
126 host_page_size
= real_host_page_size
;
127 if (host_page_size
< TARGET_PAGE_SIZE
)
128 host_page_size
= TARGET_PAGE_SIZE
;
130 while ((1 << host_page_bits
) < host_page_size
)
132 host_page_mask
= ~(host_page_size
- 1);
133 #if !defined(CONFIG_USER_ONLY)
138 static inline PageDesc
*page_find_alloc(unsigned int index
)
142 lp
= &l1_map
[index
>> L2_BITS
];
145 /* allocate if not found */
146 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
147 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
150 return p
+ (index
& (L2_SIZE
- 1));
153 static inline PageDesc
*page_find(unsigned int index
)
157 p
= l1_map
[index
>> L2_BITS
];
160 return p
+ (index
& (L2_SIZE
- 1));
163 #if !defined(CONFIG_USER_ONLY)
164 static void tlb_protect_code(CPUState
*env
, uint32_t addr
);
165 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
);
166 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
, target_ulong vaddr
);
168 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
170 VirtPageDesc
**lp
, *p
;
172 lp
= &l1_virt_map
[index
>> L2_BITS
];
175 /* allocate if not found */
176 p
= qemu_malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
177 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
180 return p
+ (index
& (L2_SIZE
- 1));
183 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
187 p
= l1_virt_map
[index
>> L2_BITS
];
190 return p
+ (index
& (L2_SIZE
- 1));
193 static void virt_page_flush(void)
200 if (virt_valid_tag
== 0) {
202 for(i
= 0; i
< L1_SIZE
; i
++) {
205 for(j
= 0; j
< L2_SIZE
; j
++)
212 static void virt_page_flush(void)
217 void cpu_exec_init(void)
220 code_gen_ptr
= code_gen_buffer
;
226 static inline void invalidate_page_bitmap(PageDesc
*p
)
228 if (p
->code_bitmap
) {
229 qemu_free(p
->code_bitmap
);
230 p
->code_bitmap
= NULL
;
232 p
->code_write_count
= 0;
235 /* set to NULL all the 'first_tb' fields in all PageDescs */
236 static void page_flush_tb(void)
241 for(i
= 0; i
< L1_SIZE
; i
++) {
244 for(j
= 0; j
< L2_SIZE
; j
++) {
246 invalidate_page_bitmap(p
);
253 /* flush all the translation blocks */
254 /* XXX: tb_flush is currently not thread safe */
255 void tb_flush(CPUState
*env
)
258 #if defined(DEBUG_FLUSH)
259 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
260 code_gen_ptr
- code_gen_buffer
,
262 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
265 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
269 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
270 tb_phys_hash
[i
] = NULL
;
273 code_gen_ptr
= code_gen_buffer
;
274 /* XXX: flush processor icache at this point if cache flush is
278 #ifdef DEBUG_TB_CHECK
280 static void tb_invalidate_check(unsigned long address
)
282 TranslationBlock
*tb
;
284 address
&= TARGET_PAGE_MASK
;
285 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
286 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
287 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
288 address
>= tb
->pc
+ tb
->size
)) {
289 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
290 address
, tb
->pc
, tb
->size
);
296 /* verify that all the pages have correct rights for code */
297 static void tb_page_check(void)
299 TranslationBlock
*tb
;
300 int i
, flags1
, flags2
;
302 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
303 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
304 flags1
= page_get_flags(tb
->pc
);
305 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
306 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
307 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
308 tb
->pc
, tb
->size
, flags1
, flags2
);
314 void tb_jmp_check(TranslationBlock
*tb
)
316 TranslationBlock
*tb1
;
319 /* suppress any remaining jumps to this TB */
323 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
326 tb1
= tb1
->jmp_next
[n1
];
328 /* check end of list */
330 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
336 /* invalidate one TB */
337 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
340 TranslationBlock
*tb1
;
344 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
347 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
351 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
353 TranslationBlock
*tb1
;
359 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
361 *ptb
= tb1
->page_next
[n1
];
364 ptb
= &tb1
->page_next
[n1
];
368 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
370 TranslationBlock
*tb1
, **ptb
;
373 ptb
= &tb
->jmp_next
[n
];
376 /* find tb(n) in circular list */
380 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
381 if (n1
== n
&& tb1
== tb
)
384 ptb
= &tb1
->jmp_first
;
386 ptb
= &tb1
->jmp_next
[n1
];
389 /* now we can suppress tb(n) from the list */
390 *ptb
= tb
->jmp_next
[n
];
392 tb
->jmp_next
[n
] = NULL
;
396 /* reset the jump entry 'n' of a TB so that it is not chained to
398 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
400 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
403 static inline void tb_invalidate(TranslationBlock
*tb
)
406 TranslationBlock
*tb1
, *tb2
, **ptb
;
408 tb_invalidated_flag
= 1;
410 /* remove the TB from the hash list */
411 h
= tb_hash_func(tb
->pc
);
415 /* NOTE: the TB is not necessarily linked in the hash. It
416 indicates that it is not currently used */
420 *ptb
= tb1
->hash_next
;
423 ptb
= &tb1
->hash_next
;
426 /* suppress this TB from the two jump lists */
427 tb_jmp_remove(tb
, 0);
428 tb_jmp_remove(tb
, 1);
430 /* suppress any remaining jumps to this TB */
436 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
437 tb2
= tb1
->jmp_next
[n1
];
438 tb_reset_jump(tb1
, n1
);
439 tb1
->jmp_next
[n1
] = NULL
;
442 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
445 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
449 target_ulong phys_pc
;
451 /* remove the TB from the hash list */
452 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
453 h
= tb_phys_hash_func(phys_pc
);
454 tb_remove(&tb_phys_hash
[h
], tb
,
455 offsetof(TranslationBlock
, phys_hash_next
));
457 /* remove the TB from the page list */
458 if (tb
->page_addr
[0] != page_addr
) {
459 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
460 tb_page_remove(&p
->first_tb
, tb
);
461 invalidate_page_bitmap(p
);
463 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
464 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
465 tb_page_remove(&p
->first_tb
, tb
);
466 invalidate_page_bitmap(p
);
472 static inline void set_bits(uint8_t *tab
, int start
, int len
)
478 mask
= 0xff << (start
& 7);
479 if ((start
& ~7) == (end
& ~7)) {
481 mask
&= ~(0xff << (end
& 7));
486 start
= (start
+ 8) & ~7;
488 while (start
< end1
) {
493 mask
= ~(0xff << (end
& 7));
499 static void build_page_bitmap(PageDesc
*p
)
501 int n
, tb_start
, tb_end
;
502 TranslationBlock
*tb
;
504 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
507 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
512 tb
= (TranslationBlock
*)((long)tb
& ~3);
513 /* NOTE: this is subtle as a TB may span two physical pages */
515 /* NOTE: tb_end may be after the end of the page, but
516 it is not a problem */
517 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
518 tb_end
= tb_start
+ tb
->size
;
519 if (tb_end
> TARGET_PAGE_SIZE
)
520 tb_end
= TARGET_PAGE_SIZE
;
523 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
525 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
526 tb
= tb
->page_next
[n
];
530 /* invalidate all TBs which intersect with the target physical page
531 starting in range [start;end[. NOTE: start and end must refer to
532 the same physical page. 'vaddr' is a virtual address referencing
533 the physical page of code. It is only used an a hint if there is no
535 static void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
540 TranslationBlock
*tb
, *tb_next
;
541 target_ulong tb_start
, tb_end
;
543 p
= page_find(start
>> TARGET_PAGE_BITS
);
546 if (!p
->code_bitmap
&&
547 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
548 /* build code bitmap */
549 build_page_bitmap(p
);
552 /* we remove all the TBs in the range [start, end[ */
553 /* XXX: see if in some cases it could be faster to invalidate all the code */
557 tb
= (TranslationBlock
*)((long)tb
& ~3);
558 tb_next
= tb
->page_next
[n
];
559 /* NOTE: this is subtle as a TB may span two physical pages */
561 /* NOTE: tb_end may be after the end of the page, but
562 it is not a problem */
563 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
564 tb_end
= tb_start
+ tb
->size
;
566 tb_start
= tb
->page_addr
[1];
567 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
569 if (!(tb_end
<= start
|| tb_start
>= end
)) {
570 tb_phys_invalidate(tb
, -1);
574 #if !defined(CONFIG_USER_ONLY)
575 /* if no code remaining, no need to continue to use slow writes */
577 invalidate_page_bitmap(p
);
578 tlb_unprotect_code_phys(cpu_single_env
, start
, vaddr
);
583 /* len must be <= 8 and start must be a multiple of len */
584 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
, target_ulong vaddr
)
589 if (cpu_single_env
->cr
[0] & CR0_PE_MASK
) {
590 printf("modifying code at 0x%x size=%d EIP=%x\n",
591 (vaddr
& TARGET_PAGE_MASK
) | (start
& ~TARGET_PAGE_MASK
), len
,
592 cpu_single_env
->eip
);
595 p
= page_find(start
>> TARGET_PAGE_BITS
);
598 if (p
->code_bitmap
) {
599 offset
= start
& ~TARGET_PAGE_MASK
;
600 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
601 if (b
& ((1 << len
) - 1))
605 tb_invalidate_phys_page_range(start
, start
+ len
, vaddr
);
609 /* invalidate all TBs which intersect with the target virtual page
610 starting in range [start;end[. This function is usually used when
611 the target processor flushes its I-cache. NOTE: start and end must
612 refer to the same physical page */
613 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
617 TranslationBlock
*tb
, *tb_next
;
619 target_ulong phys_start
;
621 #if !defined(CONFIG_USER_ONLY)
624 vp
= virt_page_find(start
>> TARGET_PAGE_BITS
);
627 if (vp
->valid_tag
!= virt_valid_tag
)
629 phys_start
= vp
->phys_addr
+ (start
& ~TARGET_PAGE_MASK
);
634 p
= page_find(phys_start
>> TARGET_PAGE_BITS
);
637 /* we remove all the TBs in the range [start, end[ */
638 /* XXX: see if in some cases it could be faster to invalidate all the code */
642 tb
= (TranslationBlock
*)((long)tb
& ~3);
643 tb_next
= tb
->page_next
[n
];
645 if (!((pc
+ tb
->size
) <= start
|| pc
>= end
)) {
646 tb_phys_invalidate(tb
, -1);
650 #if !defined(CONFIG_USER_ONLY)
651 /* if no code remaining, no need to continue to use slow writes */
653 tlb_unprotect_code(cpu_single_env
, start
);
657 #if !defined(CONFIG_SOFTMMU)
658 static void tb_invalidate_phys_page(target_ulong addr
)
662 TranslationBlock
*tb
;
664 addr
&= TARGET_PAGE_MASK
;
665 p
= page_find(addr
>> TARGET_PAGE_BITS
);
671 tb
= (TranslationBlock
*)((long)tb
& ~3);
672 tb_phys_invalidate(tb
, addr
);
673 tb
= tb
->page_next
[n
];
679 /* add the tb in the target page and protect it if necessary */
680 static inline void tb_alloc_page(TranslationBlock
*tb
,
681 unsigned int n
, unsigned int page_addr
)
684 TranslationBlock
*last_first_tb
;
686 tb
->page_addr
[n
] = page_addr
;
687 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
688 tb
->page_next
[n
] = p
->first_tb
;
689 last_first_tb
= p
->first_tb
;
690 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
691 invalidate_page_bitmap(p
);
693 #if defined(CONFIG_USER_ONLY)
694 if (p
->flags
& PAGE_WRITE
) {
695 unsigned long host_start
, host_end
, addr
;
698 /* force the host page as non writable (writes will have a
699 page fault + mprotect overhead) */
700 host_start
= page_addr
& host_page_mask
;
701 host_end
= host_start
+ host_page_size
;
703 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
704 prot
|= page_get_flags(addr
);
705 mprotect((void *)host_start
, host_page_size
,
706 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
707 #ifdef DEBUG_TB_INVALIDATE
708 printf("protecting code page: 0x%08lx\n",
711 p
->flags
&= ~PAGE_WRITE
;
714 /* if some code is already present, then the pages are already
715 protected. So we handle the case where only the first TB is
716 allocated in a physical page */
717 if (!last_first_tb
) {
718 target_ulong virt_addr
;
720 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
721 tlb_protect_code(cpu_single_env
, virt_addr
);
726 /* Allocate a new translation block. Flush the translation buffer if
727 too many translation blocks or too much generated code. */
728 TranslationBlock
*tb_alloc(unsigned long pc
)
730 TranslationBlock
*tb
;
732 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
733 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
740 /* add a new TB and link it to the physical page tables. phys_page2 is
741 (-1) to indicate that only one page contains the TB. */
742 void tb_link_phys(TranslationBlock
*tb
,
743 target_ulong phys_pc
, target_ulong phys_page2
)
746 TranslationBlock
**ptb
;
748 /* add in the physical hash table */
749 h
= tb_phys_hash_func(phys_pc
);
750 ptb
= &tb_phys_hash
[h
];
751 tb
->phys_hash_next
= *ptb
;
754 /* add in the page list */
755 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
756 if (phys_page2
!= -1)
757 tb_alloc_page(tb
, 1, phys_page2
);
759 tb
->page_addr
[1] = -1;
760 #ifdef DEBUG_TB_CHECK
765 /* link the tb with the other TBs */
766 void tb_link(TranslationBlock
*tb
)
768 #if !defined(CONFIG_USER_ONLY)
773 /* save the code memory mappings (needed to invalidate the code) */
774 addr
= tb
->pc
& TARGET_PAGE_MASK
;
775 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
776 #ifdef DEBUG_TLB_CHECK
777 if (vp
->valid_tag
== virt_valid_tag
&&
778 vp
->phys_addr
!= tb
->page_addr
[0]) {
779 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
780 addr
, tb
->page_addr
[0], vp
->phys_addr
);
783 vp
->phys_addr
= tb
->page_addr
[0];
784 if (vp
->valid_tag
!= virt_valid_tag
) {
785 vp
->valid_tag
= virt_valid_tag
;
786 #if !defined(CONFIG_SOFTMMU)
791 if (tb
->page_addr
[1] != -1) {
792 addr
+= TARGET_PAGE_SIZE
;
793 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
794 #ifdef DEBUG_TLB_CHECK
795 if (vp
->valid_tag
== virt_valid_tag
&&
796 vp
->phys_addr
!= tb
->page_addr
[1]) {
797 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
798 addr
, tb
->page_addr
[1], vp
->phys_addr
);
801 vp
->phys_addr
= tb
->page_addr
[1];
802 if (vp
->valid_tag
!= virt_valid_tag
) {
803 vp
->valid_tag
= virt_valid_tag
;
804 #if !defined(CONFIG_SOFTMMU)
812 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
813 tb
->jmp_next
[0] = NULL
;
814 tb
->jmp_next
[1] = NULL
;
816 /* init original jump addresses */
817 if (tb
->tb_next_offset
[0] != 0xffff)
818 tb_reset_jump(tb
, 0);
819 if (tb
->tb_next_offset
[1] != 0xffff)
820 tb_reset_jump(tb
, 1);
823 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
824 tb[1].tc_ptr. Return NULL if not found */
825 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
829 TranslationBlock
*tb
;
833 if (tc_ptr
< (unsigned long)code_gen_buffer
||
834 tc_ptr
>= (unsigned long)code_gen_ptr
)
836 /* binary search (cf Knuth) */
839 while (m_min
<= m_max
) {
840 m
= (m_min
+ m_max
) >> 1;
842 v
= (unsigned long)tb
->tc_ptr
;
845 else if (tc_ptr
< v
) {
854 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
856 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
858 TranslationBlock
*tb1
, *tb_next
, **ptb
;
861 tb1
= tb
->jmp_next
[n
];
863 /* find head of list */
866 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
869 tb1
= tb1
->jmp_next
[n1
];
871 /* we are now sure now that tb jumps to tb1 */
874 /* remove tb from the jmp_first list */
875 ptb
= &tb_next
->jmp_first
;
879 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
880 if (n1
== n
&& tb1
== tb
)
882 ptb
= &tb1
->jmp_next
[n1
];
884 *ptb
= tb
->jmp_next
[n
];
885 tb
->jmp_next
[n
] = NULL
;
887 /* suppress the jump to next tb in generated code */
888 tb_reset_jump(tb
, n
);
890 /* suppress jumps in the tb on which we could have jumped */
891 tb_reset_jump_recursive(tb_next
);
895 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
897 tb_reset_jump_recursive2(tb
, 0);
898 tb_reset_jump_recursive2(tb
, 1);
901 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
902 breakpoint is reached */
903 int cpu_breakpoint_insert(CPUState
*env
, uint32_t pc
)
905 #if defined(TARGET_I386)
908 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
909 if (env
->breakpoints
[i
] == pc
)
913 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
915 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
916 tb_invalidate_page_range(pc
, pc
+ 1);
923 /* remove a breakpoint */
924 int cpu_breakpoint_remove(CPUState
*env
, uint32_t pc
)
926 #if defined(TARGET_I386)
928 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
929 if (env
->breakpoints
[i
] == pc
)
934 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
935 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
936 env
->nb_breakpoints
--;
937 tb_invalidate_page_range(pc
, pc
+ 1);
944 /* enable or disable single step mode. EXCP_DEBUG is returned by the
945 CPU loop after each instruction */
946 void cpu_single_step(CPUState
*env
, int enabled
)
948 #if defined(TARGET_I386)
949 if (env
->singlestep_enabled
!= enabled
) {
950 env
->singlestep_enabled
= enabled
;
951 /* must flush all the translated code to avoid inconsistancies */
952 /* XXX: only flush what is necessary */
958 /* enable or disable low levels log */
959 void cpu_set_log(int log_flags
)
961 loglevel
= log_flags
;
962 if (loglevel
&& !logfile
) {
963 logfile
= fopen(logfilename
, "w");
968 #if !defined(CONFIG_SOFTMMU)
969 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
971 static uint8_t logfile_buf
[4096];
972 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
975 setvbuf(logfile
, NULL
, _IOLBF
, 0);
980 void cpu_set_log_filename(const char *filename
)
982 logfilename
= strdup(filename
);
985 /* mask must never be zero, except for A20 change call */
986 void cpu_interrupt(CPUState
*env
, int mask
)
988 TranslationBlock
*tb
;
989 static int interrupt_lock
;
991 env
->interrupt_request
|= mask
;
992 /* if the cpu is currently executing code, we must unlink it and
993 all the potentially executing TB */
994 tb
= env
->current_tb
;
995 if (tb
&& !testandset(&interrupt_lock
)) {
996 env
->current_tb
= NULL
;
997 tb_reset_jump_recursive(tb
);
1003 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1008 fprintf(stderr
, "qemu: fatal: ");
1009 vfprintf(stderr
, fmt
, ap
);
1010 fprintf(stderr
, "\n");
1012 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1018 #if !defined(CONFIG_USER_ONLY)
1020 /* NOTE: if flush_global is true, also flush global entries (not
1022 void tlb_flush(CPUState
*env
, int flush_global
)
1026 #if defined(DEBUG_TLB)
1027 printf("tlb_flush:\n");
1029 /* must reset current TB so that interrupts cannot modify the
1030 links while we are modifying them */
1031 env
->current_tb
= NULL
;
1033 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1034 env
->tlb_read
[0][i
].address
= -1;
1035 env
->tlb_write
[0][i
].address
= -1;
1036 env
->tlb_read
[1][i
].address
= -1;
1037 env
->tlb_write
[1][i
].address
= -1;
1041 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1044 #if !defined(CONFIG_SOFTMMU)
1045 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1049 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1051 if (addr
== (tlb_entry
->address
&
1052 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1053 tlb_entry
->address
= -1;
1056 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1061 TranslationBlock
*tb
;
1063 #if defined(DEBUG_TLB)
1064 printf("tlb_flush_page: 0x%08x\n", addr
);
1066 /* must reset current TB so that interrupts cannot modify the
1067 links while we are modifying them */
1068 env
->current_tb
= NULL
;
1070 addr
&= TARGET_PAGE_MASK
;
1071 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1072 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1073 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1074 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1075 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1077 /* remove from the virtual pc hash table all the TB at this
1080 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1081 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1082 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1084 /* we remove all the links to the TBs in this virtual page */
1086 while (tb
!= NULL
) {
1088 tb
= (TranslationBlock
*)((long)tb
& ~3);
1089 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1090 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1093 tb
= tb
->page_next
[n
];
1099 #if !defined(CONFIG_SOFTMMU)
1100 if (addr
< MMAP_AREA_END
)
1101 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1105 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1107 if (addr
== (tlb_entry
->address
&
1108 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1109 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1110 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1111 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_CODE
;
1115 /* update the TLBs so that writes to code in the virtual page 'addr'
1117 static void tlb_protect_code(CPUState
*env
, uint32_t addr
)
1121 addr
&= TARGET_PAGE_MASK
;
1122 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1123 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1124 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1125 #if !defined(CONFIG_SOFTMMU)
1126 /* NOTE: as we generated the code for this page, it is already at
1128 if (addr
< MMAP_AREA_END
)
1129 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1133 static inline void tlb_unprotect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1135 if (addr
== (tlb_entry
->address
&
1136 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1137 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
) {
1138 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1142 /* update the TLB so that writes in virtual page 'addr' are no longer
1143 tested self modifying code */
1144 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
)
1148 addr
&= TARGET_PAGE_MASK
;
1149 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1150 tlb_unprotect_code1(&env
->tlb_write
[0][i
], addr
);
1151 tlb_unprotect_code1(&env
->tlb_write
[1][i
], addr
);
1154 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1157 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1158 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1159 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1163 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1164 tested self modifying code */
1165 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
, target_ulong vaddr
)
1169 phys_addr
&= TARGET_PAGE_MASK
;
1170 phys_addr
+= (long)phys_ram_base
;
1171 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1172 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1173 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1176 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1177 unsigned long start
, unsigned long length
)
1180 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1181 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1182 if ((addr
- start
) < length
) {
1183 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1188 void cpu_physical_memory_reset_dirty(target_ulong start
, target_ulong end
)
1191 target_ulong length
, start1
;
1194 start
&= TARGET_PAGE_MASK
;
1195 end
= TARGET_PAGE_ALIGN(end
);
1197 length
= end
- start
;
1200 memset(phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
), 0, length
>> TARGET_PAGE_BITS
);
1202 env
= cpu_single_env
;
1203 /* we modify the TLB cache so that the dirty bit will be set again
1204 when accessing the range */
1205 start1
= start
+ (unsigned long)phys_ram_base
;
1206 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1207 tlb_reset_dirty_range(&env
->tlb_write
[0][i
], start1
, length
);
1208 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1209 tlb_reset_dirty_range(&env
->tlb_write
[1][i
], start1
, length
);
1211 #if !defined(CONFIG_SOFTMMU)
1212 /* XXX: this is expensive */
1218 for(i
= 0; i
< L1_SIZE
; i
++) {
1221 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1222 for(j
= 0; j
< L2_SIZE
; j
++) {
1223 if (p
->valid_tag
== virt_valid_tag
&&
1224 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1225 (p
->prot
& PROT_WRITE
)) {
1226 if (addr
< MMAP_AREA_END
) {
1227 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1228 p
->prot
& ~PROT_WRITE
);
1231 addr
+= TARGET_PAGE_SIZE
;
1240 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1241 unsigned long start
)
1244 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1245 addr
= (tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1246 if (addr
== start
) {
1247 tlb_entry
->address
= (tlb_entry
->address
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1252 /* update the TLB corresponding to virtual page vaddr and phys addr
1253 addr so that it is no longer dirty */
1254 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1256 CPUState
*env
= cpu_single_env
;
1259 phys_ram_dirty
[(addr
- (unsigned long)phys_ram_base
) >> TARGET_PAGE_BITS
] = 1;
1261 addr
&= TARGET_PAGE_MASK
;
1262 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1263 tlb_set_dirty1(&env
->tlb_write
[0][i
], addr
);
1264 tlb_set_dirty1(&env
->tlb_write
[1][i
], addr
);
1267 /* add a new TLB entry. At most one entry for a given virtual address
1268 is permitted. Return 0 if OK or 2 if the page could not be mapped
1269 (can only happen in non SOFTMMU mode for I/O pages or pages
1270 conflicting with the host address space). */
1271 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1272 int is_user
, int is_softmmu
)
1276 TranslationBlock
*first_tb
;
1278 target_ulong address
, addend
;
1281 p
= page_find(paddr
>> TARGET_PAGE_BITS
);
1283 pd
= IO_MEM_UNASSIGNED
;
1286 pd
= p
->phys_offset
;
1287 first_tb
= p
->first_tb
;
1289 #if defined(DEBUG_TLB)
1290 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1291 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1295 #if !defined(CONFIG_SOFTMMU)
1299 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1300 /* IO memory case */
1301 address
= vaddr
| pd
;
1304 /* standard memory */
1306 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1309 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1311 if (prot
& PROT_READ
) {
1312 env
->tlb_read
[is_user
][index
].address
= address
;
1313 env
->tlb_read
[is_user
][index
].addend
= addend
;
1315 env
->tlb_read
[is_user
][index
].address
= -1;
1316 env
->tlb_read
[is_user
][index
].addend
= -1;
1318 if (prot
& PROT_WRITE
) {
1319 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1320 /* ROM: access is ignored (same as unassigned) */
1321 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1322 env
->tlb_write
[is_user
][index
].addend
= addend
;
1323 } else if (first_tb
) {
1324 /* if code is present, we use a specific memory
1325 handler. It works only for physical memory access */
1326 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1327 env
->tlb_write
[is_user
][index
].addend
= addend
;
1328 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1329 !cpu_physical_memory_is_dirty(pd
)) {
1330 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_NOTDIRTY
;
1331 env
->tlb_write
[is_user
][index
].addend
= addend
;
1333 env
->tlb_write
[is_user
][index
].address
= address
;
1334 env
->tlb_write
[is_user
][index
].addend
= addend
;
1337 env
->tlb_write
[is_user
][index
].address
= -1;
1338 env
->tlb_write
[is_user
][index
].addend
= -1;
1341 #if !defined(CONFIG_SOFTMMU)
1343 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1344 /* IO access: no mapping is done as it will be handled by the
1346 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1351 if (vaddr
>= MMAP_AREA_END
) {
1354 if (prot
& PROT_WRITE
) {
1355 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1357 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1358 !cpu_physical_memory_is_dirty(pd
))) {
1359 /* ROM: we do as if code was inside */
1360 /* if code is present, we only map as read only and save the
1364 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1367 vp
->valid_tag
= virt_valid_tag
;
1368 prot
&= ~PAGE_WRITE
;
1371 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1372 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1373 if (map_addr
== MAP_FAILED
) {
1374 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1384 /* called from signal handler: invalidate the code and unprotect the
1385 page. Return TRUE if the fault was succesfully handled. */
1386 int page_unprotect(unsigned long addr
)
1388 #if !defined(CONFIG_SOFTMMU)
1391 #if defined(DEBUG_TLB)
1392 printf("page_unprotect: addr=0x%08x\n", addr
);
1394 addr
&= TARGET_PAGE_MASK
;
1396 /* if it is not mapped, no need to worry here */
1397 if (addr
>= MMAP_AREA_END
)
1399 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1402 /* NOTE: in this case, validate_tag is _not_ tested as it
1403 validates only the code TLB */
1404 if (vp
->valid_tag
!= virt_valid_tag
)
1406 if (!(vp
->prot
& PAGE_WRITE
))
1408 #if defined(DEBUG_TLB)
1409 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1410 addr
, vp
->phys_addr
, vp
->prot
);
1412 /* set the dirty bit */
1413 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 1;
1414 /* flush the code inside */
1415 tb_invalidate_phys_page(vp
->phys_addr
);
1416 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1417 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1418 (unsigned long)addr
, vp
->prot
);
1427 void tlb_flush(CPUState
*env
, int flush_global
)
1431 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1435 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
)
1439 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1440 int is_user
, int is_softmmu
)
1445 /* dump memory mappings */
1446 void page_dump(FILE *f
)
1448 unsigned long start
, end
;
1449 int i
, j
, prot
, prot1
;
1452 fprintf(f
, "%-8s %-8s %-8s %s\n",
1453 "start", "end", "size", "prot");
1457 for(i
= 0; i
<= L1_SIZE
; i
++) {
1462 for(j
= 0;j
< L2_SIZE
; j
++) {
1467 if (prot1
!= prot
) {
1468 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1470 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1471 start
, end
, end
- start
,
1472 prot
& PAGE_READ
? 'r' : '-',
1473 prot
& PAGE_WRITE
? 'w' : '-',
1474 prot
& PAGE_EXEC
? 'x' : '-');
1488 int page_get_flags(unsigned long address
)
1492 p
= page_find(address
>> TARGET_PAGE_BITS
);
1498 /* modify the flags of a page and invalidate the code if
1499 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1500 depending on PAGE_WRITE */
1501 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1506 start
= start
& TARGET_PAGE_MASK
;
1507 end
= TARGET_PAGE_ALIGN(end
);
1508 if (flags
& PAGE_WRITE
)
1509 flags
|= PAGE_WRITE_ORG
;
1510 spin_lock(&tb_lock
);
1511 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1512 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1513 /* if the write protection is set, then we invalidate the code
1515 if (!(p
->flags
& PAGE_WRITE
) &&
1516 (flags
& PAGE_WRITE
) &&
1518 tb_invalidate_phys_page(addr
);
1522 spin_unlock(&tb_lock
);
1525 /* called from signal handler: invalidate the code and unprotect the
1526 page. Return TRUE if the fault was succesfully handled. */
1527 int page_unprotect(unsigned long address
)
1529 unsigned int page_index
, prot
, pindex
;
1531 unsigned long host_start
, host_end
, addr
;
1533 host_start
= address
& host_page_mask
;
1534 page_index
= host_start
>> TARGET_PAGE_BITS
;
1535 p1
= page_find(page_index
);
1538 host_end
= host_start
+ host_page_size
;
1541 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1545 /* if the page was really writable, then we change its
1546 protection back to writable */
1547 if (prot
& PAGE_WRITE_ORG
) {
1548 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1549 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1550 mprotect((void *)host_start
, host_page_size
,
1551 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1552 p1
[pindex
].flags
|= PAGE_WRITE
;
1553 /* and since the content will be modified, we must invalidate
1554 the corresponding translated code. */
1555 tb_invalidate_phys_page(address
);
1556 #ifdef DEBUG_TB_CHECK
1557 tb_invalidate_check(address
);
1565 /* call this function when system calls directly modify a memory area */
1566 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1568 unsigned long start
, end
, addr
;
1570 start
= (unsigned long)data
;
1571 end
= start
+ data_size
;
1572 start
&= TARGET_PAGE_MASK
;
1573 end
= TARGET_PAGE_ALIGN(end
);
1574 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1575 page_unprotect(addr
);
1579 static inline void tlb_set_dirty(unsigned long addr
, target_ulong vaddr
)
1583 #endif /* defined(CONFIG_USER_ONLY) */
1585 /* register physical memory. 'size' must be a multiple of the target
1586 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1588 void cpu_register_physical_memory(unsigned long start_addr
, unsigned long size
,
1591 unsigned long addr
, end_addr
;
1594 end_addr
= start_addr
+ size
;
1595 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1596 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1597 p
->phys_offset
= phys_offset
;
1598 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1599 phys_offset
+= TARGET_PAGE_SIZE
;
1603 static uint32_t unassigned_mem_readb(uint32_t addr
)
1608 static void unassigned_mem_writeb(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1612 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1613 unassigned_mem_readb
,
1614 unassigned_mem_readb
,
1615 unassigned_mem_readb
,
1618 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1619 unassigned_mem_writeb
,
1620 unassigned_mem_writeb
,
1621 unassigned_mem_writeb
,
1624 /* self modifying code support in soft mmu mode : writing to a page
1625 containing code comes to these functions */
1627 static void code_mem_writeb(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1629 unsigned long phys_addr
;
1631 phys_addr
= addr
- (long)phys_ram_base
;
1632 #if !defined(CONFIG_USER_ONLY)
1633 tb_invalidate_phys_page_fast(phys_addr
, 1, vaddr
);
1635 stb_raw((uint8_t *)addr
, val
);
1636 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1639 static void code_mem_writew(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1641 unsigned long phys_addr
;
1643 phys_addr
= addr
- (long)phys_ram_base
;
1644 #if !defined(CONFIG_USER_ONLY)
1645 tb_invalidate_phys_page_fast(phys_addr
, 2, vaddr
);
1647 stw_raw((uint8_t *)addr
, val
);
1648 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1651 static void code_mem_writel(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1653 unsigned long phys_addr
;
1655 phys_addr
= addr
- (long)phys_ram_base
;
1656 #if !defined(CONFIG_USER_ONLY)
1657 tb_invalidate_phys_page_fast(phys_addr
, 4, vaddr
);
1659 stl_raw((uint8_t *)addr
, val
);
1660 phys_ram_dirty
[phys_addr
>> TARGET_PAGE_BITS
] = 1;
1663 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1664 NULL
, /* never used */
1665 NULL
, /* never used */
1666 NULL
, /* never used */
1669 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1675 static void notdirty_mem_writeb(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1677 stb_raw((uint8_t *)addr
, val
);
1678 tlb_set_dirty(addr
, vaddr
);
1681 static void notdirty_mem_writew(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1683 stw_raw((uint8_t *)addr
, val
);
1684 tlb_set_dirty(addr
, vaddr
);
1687 static void notdirty_mem_writel(uint32_t addr
, uint32_t val
, uint32_t vaddr
)
1689 stl_raw((uint8_t *)addr
, val
);
1690 tlb_set_dirty(addr
, vaddr
);
1693 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
1694 notdirty_mem_writeb
,
1695 notdirty_mem_writew
,
1696 notdirty_mem_writel
,
1699 static void io_mem_init(void)
1701 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
);
1702 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
);
1703 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
);
1704 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, code_mem_read
, notdirty_mem_write
);
1707 /* alloc dirty bits array */
1708 phys_ram_dirty
= qemu_malloc(phys_ram_size
>> TARGET_PAGE_BITS
);
1711 /* mem_read and mem_write are arrays of functions containing the
1712 function to access byte (index 0), word (index 1) and dword (index
1713 2). All functions must be supplied. If io_index is non zero, the
1714 corresponding io zone is modified. If it is zero, a new io zone is
1715 allocated. The return value can be used with
1716 cpu_register_physical_memory(). (-1) is returned if error. */
1717 int cpu_register_io_memory(int io_index
,
1718 CPUReadMemoryFunc
**mem_read
,
1719 CPUWriteMemoryFunc
**mem_write
)
1723 if (io_index
<= 0) {
1724 if (io_index
>= IO_MEM_NB_ENTRIES
)
1726 io_index
= io_mem_nb
++;
1728 if (io_index
>= IO_MEM_NB_ENTRIES
)
1732 for(i
= 0;i
< 3; i
++) {
1733 io_mem_read
[io_index
][i
] = mem_read
[i
];
1734 io_mem_write
[io_index
][i
] = mem_write
[i
];
1736 return io_index
<< IO_MEM_SHIFT
;
1739 /* physical memory access (slow version, mainly for debug) */
1740 #if defined(CONFIG_USER_ONLY)
1741 void cpu_physical_memory_rw(CPUState
*env
, uint8_t *buf
, target_ulong addr
,
1742 int len
, int is_write
)
1748 page
= addr
& TARGET_PAGE_MASK
;
1749 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1752 flags
= page_get_flags(page
);
1753 if (!(flags
& PAGE_VALID
))
1756 if (!(flags
& PAGE_WRITE
))
1758 memcpy((uint8_t *)addr
, buf
, len
);
1760 if (!(flags
& PAGE_READ
))
1762 memcpy(buf
, (uint8_t *)addr
, len
);
1770 void cpu_physical_memory_rw(CPUState
*env
, uint8_t *buf
, target_ulong addr
,
1771 int len
, int is_write
)
1776 target_ulong page
, pd
;
1780 page
= addr
& TARGET_PAGE_MASK
;
1781 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1784 p
= page_find(page
>> TARGET_PAGE_BITS
);
1786 pd
= IO_MEM_UNASSIGNED
;
1788 pd
= p
->phys_offset
;
1792 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
1793 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1794 if (l
>= 4 && ((addr
& 3) == 0)) {
1795 /* 32 bit read access */
1797 io_mem_write
[io_index
][2](addr
, val
, 0);
1799 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1800 /* 16 bit read access */
1801 val
= lduw_raw(buf
);
1802 io_mem_write
[io_index
][1](addr
, val
, 0);
1806 val
= ldub_raw(buf
);
1807 io_mem_write
[io_index
][0](addr
, val
, 0);
1812 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
1813 (addr
& ~TARGET_PAGE_MASK
);
1814 memcpy(ptr
, buf
, l
);
1817 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
1818 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
1820 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1821 if (l
>= 4 && ((addr
& 3) == 0)) {
1822 /* 32 bit read access */
1823 val
= io_mem_read
[io_index
][2](addr
);
1826 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1827 /* 16 bit read access */
1828 val
= io_mem_read
[io_index
][1](addr
);
1833 val
= io_mem_read
[io_index
][0](addr
);
1839 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
1840 (addr
& ~TARGET_PAGE_MASK
);
1841 memcpy(buf
, ptr
, l
);
1851 /* virtual memory access for debug */
1852 int cpu_memory_rw_debug(CPUState
*env
,
1853 uint8_t *buf
, target_ulong addr
, int len
, int is_write
)
1856 target_ulong page
, phys_addr
;
1859 page
= addr
& TARGET_PAGE_MASK
;
1860 phys_addr
= cpu_get_phys_page_debug(env
, page
);
1861 /* if no physical page mapped, return an error */
1862 if (phys_addr
== -1)
1864 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1867 cpu_physical_memory_rw(env
, buf
,
1868 phys_addr
+ (addr
& ~TARGET_PAGE_MASK
), l
,
1877 #if !defined(CONFIG_USER_ONLY)
1879 #define MMUSUFFIX _cmmu
1880 #define GETPC() NULL
1881 #define env cpu_single_env
1884 #include "softmmu_template.h"
1887 #include "softmmu_template.h"
1890 #include "softmmu_template.h"
1893 #include "softmmu_template.h"