]>
git.proxmox.com Git - qemu.git/blob - exec.c
2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 //#define DEBUG_TB_INVALIDATE
37 /* make various TB consistency checks */
38 //#define DEBUG_TB_CHECK
39 //#define DEBUG_TLB_CHECK
41 /* threshold to flush the translated code buffer */
42 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
44 #define SMC_BITMAP_USE_THRESHOLD 10
46 #define MMAP_AREA_START 0x00000000
47 #define MMAP_AREA_END 0xa8000000
49 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
50 TranslationBlock
*tb_hash
[CODE_GEN_HASH_SIZE
];
51 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
53 /* any access to the tbs or the page table must use this lock */
54 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
56 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
];
57 uint8_t *code_gen_ptr
;
61 uint8_t *phys_ram_base
;
63 typedef struct PageDesc
{
64 /* offset in memory of the page + io_index in the low 12 bits */
65 unsigned long phys_offset
;
66 /* list of TBs intersecting this physical page */
67 TranslationBlock
*first_tb
;
68 /* in order to optimize self modifying code, we count the number
69 of lookups we do to a given page to use a bitmap */
70 unsigned int code_write_count
;
72 #if defined(CONFIG_USER_ONLY)
77 typedef struct VirtPageDesc
{
78 /* physical address of code page. It is valid only if 'valid_tag'
79 matches 'virt_valid_tag' */
80 target_ulong phys_addr
;
81 unsigned int valid_tag
;
82 #if !defined(CONFIG_SOFTMMU)
83 /* original page access rights. It is valid only if 'valid_tag'
84 matches 'virt_valid_tag' */
90 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
92 #define L1_SIZE (1 << L1_BITS)
93 #define L2_SIZE (1 << L2_BITS)
95 static void io_mem_init(void);
97 unsigned long real_host_page_size
;
98 unsigned long host_page_bits
;
99 unsigned long host_page_size
;
100 unsigned long host_page_mask
;
102 static PageDesc
*l1_map
[L1_SIZE
];
104 #if !defined(CONFIG_USER_ONLY)
105 static VirtPageDesc
*l1_virt_map
[L1_SIZE
];
106 static unsigned int virt_valid_tag
;
109 /* io memory support */
110 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
111 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
112 static int io_mem_nb
;
115 char *logfilename
= "/tmp/qemu.log";
119 static void page_init(void)
121 /* NOTE: we can always suppose that host_page_size >=
123 real_host_page_size
= getpagesize();
124 if (host_page_size
== 0)
125 host_page_size
= real_host_page_size
;
126 if (host_page_size
< TARGET_PAGE_SIZE
)
127 host_page_size
= TARGET_PAGE_SIZE
;
129 while ((1 << host_page_bits
) < host_page_size
)
131 host_page_mask
= ~(host_page_size
- 1);
132 #if !defined(CONFIG_USER_ONLY)
137 static inline PageDesc
*page_find_alloc(unsigned int index
)
141 lp
= &l1_map
[index
>> L2_BITS
];
144 /* allocate if not found */
145 p
= malloc(sizeof(PageDesc
) * L2_SIZE
);
146 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
149 return p
+ (index
& (L2_SIZE
- 1));
152 static inline PageDesc
*page_find(unsigned int index
)
156 p
= l1_map
[index
>> L2_BITS
];
159 return p
+ (index
& (L2_SIZE
- 1));
162 #if !defined(CONFIG_USER_ONLY)
163 static void tlb_protect_code(CPUState
*env
, uint32_t addr
);
164 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
);
165 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
);
167 static inline VirtPageDesc
*virt_page_find_alloc(unsigned int index
)
169 VirtPageDesc
**lp
, *p
;
171 lp
= &l1_virt_map
[index
>> L2_BITS
];
174 /* allocate if not found */
175 p
= malloc(sizeof(VirtPageDesc
) * L2_SIZE
);
176 memset(p
, 0, sizeof(VirtPageDesc
) * L2_SIZE
);
179 return p
+ (index
& (L2_SIZE
- 1));
182 static inline VirtPageDesc
*virt_page_find(unsigned int index
)
186 p
= l1_virt_map
[index
>> L2_BITS
];
189 return p
+ (index
& (L2_SIZE
- 1));
192 static void virt_page_flush(void)
199 if (virt_valid_tag
== 0) {
201 for(i
= 0; i
< L1_SIZE
; i
++) {
204 for(j
= 0; j
< L2_SIZE
; j
++)
211 static void virt_page_flush(void)
216 void cpu_exec_init(void)
219 code_gen_ptr
= code_gen_buffer
;
225 static inline void invalidate_page_bitmap(PageDesc
*p
)
227 if (p
->code_bitmap
) {
228 free(p
->code_bitmap
);
229 p
->code_bitmap
= NULL
;
231 p
->code_write_count
= 0;
234 /* set to NULL all the 'first_tb' fields in all PageDescs */
235 static void page_flush_tb(void)
240 for(i
= 0; i
< L1_SIZE
; i
++) {
243 for(j
= 0; j
< L2_SIZE
; j
++) {
245 invalidate_page_bitmap(p
);
252 /* flush all the translation blocks */
253 /* XXX: tb_flush is currently not thread safe */
254 void tb_flush(CPUState
*env
)
257 #if defined(DEBUG_FLUSH)
258 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
259 code_gen_ptr
- code_gen_buffer
,
261 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
264 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
268 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++)
269 tb_phys_hash
[i
] = NULL
;
272 code_gen_ptr
= code_gen_buffer
;
273 /* XXX: flush processor icache at this point if cache flush is
277 #ifdef DEBUG_TB_CHECK
279 static void tb_invalidate_check(unsigned long address
)
281 TranslationBlock
*tb
;
283 address
&= TARGET_PAGE_MASK
;
284 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
285 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
286 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
287 address
>= tb
->pc
+ tb
->size
)) {
288 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
289 address
, tb
->pc
, tb
->size
);
295 /* verify that all the pages have correct rights for code */
296 static void tb_page_check(void)
298 TranslationBlock
*tb
;
299 int i
, flags1
, flags2
;
301 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++) {
302 for(tb
= tb_hash
[i
]; tb
!= NULL
; tb
= tb
->hash_next
) {
303 flags1
= page_get_flags(tb
->pc
);
304 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
305 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
306 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
307 tb
->pc
, tb
->size
, flags1
, flags2
);
313 void tb_jmp_check(TranslationBlock
*tb
)
315 TranslationBlock
*tb1
;
318 /* suppress any remaining jumps to this TB */
322 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
325 tb1
= tb1
->jmp_next
[n1
];
327 /* check end of list */
329 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
335 /* invalidate one TB */
336 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
339 TranslationBlock
*tb1
;
343 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
346 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
350 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
352 TranslationBlock
*tb1
;
358 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
360 *ptb
= tb1
->page_next
[n1
];
363 ptb
= &tb1
->page_next
[n1
];
367 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
369 TranslationBlock
*tb1
, **ptb
;
372 ptb
= &tb
->jmp_next
[n
];
375 /* find tb(n) in circular list */
379 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
380 if (n1
== n
&& tb1
== tb
)
383 ptb
= &tb1
->jmp_first
;
385 ptb
= &tb1
->jmp_next
[n1
];
388 /* now we can suppress tb(n) from the list */
389 *ptb
= tb
->jmp_next
[n
];
391 tb
->jmp_next
[n
] = NULL
;
395 /* reset the jump entry 'n' of a TB so that it is not chained to
397 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
399 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
402 static inline void tb_invalidate(TranslationBlock
*tb
)
405 TranslationBlock
*tb1
, *tb2
, **ptb
;
407 tb_invalidated_flag
= 1;
409 /* remove the TB from the hash list */
410 h
= tb_hash_func(tb
->pc
);
414 /* NOTE: the TB is not necessarily linked in the hash. It
415 indicates that it is not currently used */
419 *ptb
= tb1
->hash_next
;
422 ptb
= &tb1
->hash_next
;
425 /* suppress this TB from the two jump lists */
426 tb_jmp_remove(tb
, 0);
427 tb_jmp_remove(tb
, 1);
429 /* suppress any remaining jumps to this TB */
435 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
436 tb2
= tb1
->jmp_next
[n1
];
437 tb_reset_jump(tb1
, n1
);
438 tb1
->jmp_next
[n1
] = NULL
;
441 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
444 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
448 target_ulong phys_pc
;
450 /* remove the TB from the hash list */
451 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
452 h
= tb_phys_hash_func(phys_pc
);
453 tb_remove(&tb_phys_hash
[h
], tb
,
454 offsetof(TranslationBlock
, phys_hash_next
));
456 /* remove the TB from the page list */
457 if (tb
->page_addr
[0] != page_addr
) {
458 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
459 tb_page_remove(&p
->first_tb
, tb
);
460 invalidate_page_bitmap(p
);
462 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
463 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
464 tb_page_remove(&p
->first_tb
, tb
);
465 invalidate_page_bitmap(p
);
471 static inline void set_bits(uint8_t *tab
, int start
, int len
)
477 mask
= 0xff << (start
& 7);
478 if ((start
& ~7) == (end
& ~7)) {
480 mask
&= ~(0xff << (end
& 7));
485 start
= (start
+ 8) & ~7;
487 while (start
< end1
) {
492 mask
= ~(0xff << (end
& 7));
498 static void build_page_bitmap(PageDesc
*p
)
500 int n
, tb_start
, tb_end
;
501 TranslationBlock
*tb
;
503 p
->code_bitmap
= malloc(TARGET_PAGE_SIZE
/ 8);
506 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
511 tb
= (TranslationBlock
*)((long)tb
& ~3);
512 /* NOTE: this is subtle as a TB may span two physical pages */
514 /* NOTE: tb_end may be after the end of the page, but
515 it is not a problem */
516 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
517 tb_end
= tb_start
+ tb
->size
;
518 if (tb_end
> TARGET_PAGE_SIZE
)
519 tb_end
= TARGET_PAGE_SIZE
;
522 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
524 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
525 tb
= tb
->page_next
[n
];
529 /* invalidate all TBs which intersect with the target physical page
530 starting in range [start;end[. NOTE: start and end must refer to
531 the same physical page */
532 static void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
)
536 TranslationBlock
*tb
, *tb_next
;
537 target_ulong tb_start
, tb_end
;
539 p
= page_find(start
>> TARGET_PAGE_BITS
);
542 if (!p
->code_bitmap
&&
543 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
) {
544 /* build code bitmap */
545 build_page_bitmap(p
);
548 /* we remove all the TBs in the range [start, end[ */
549 /* XXX: see if in some cases it could be faster to invalidate all the code */
553 tb
= (TranslationBlock
*)((long)tb
& ~3);
554 tb_next
= tb
->page_next
[n
];
555 /* NOTE: this is subtle as a TB may span two physical pages */
557 /* NOTE: tb_end may be after the end of the page, but
558 it is not a problem */
559 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
560 tb_end
= tb_start
+ tb
->size
;
562 tb_start
= tb
->page_addr
[1];
563 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
565 if (!(tb_end
<= start
|| tb_start
>= end
)) {
566 tb_phys_invalidate(tb
, -1);
570 #if !defined(CONFIG_USER_ONLY)
571 /* if no code remaining, no need to continue to use slow writes */
573 invalidate_page_bitmap(p
);
574 tlb_unprotect_code_phys(cpu_single_env
, start
);
579 /* len must be <= 8 and start must be a multiple of len */
580 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
585 p
= page_find(start
>> TARGET_PAGE_BITS
);
588 if (p
->code_bitmap
) {
589 offset
= start
& ~TARGET_PAGE_MASK
;
590 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
591 if (b
& ((1 << len
) - 1))
595 tb_invalidate_phys_page_range(start
, start
+ len
);
599 /* invalidate all TBs which intersect with the target virtual page
600 starting in range [start;end[. This function is usually used when
601 the target processor flushes its I-cache. NOTE: start and end must
602 refer to the same physical page */
603 void tb_invalidate_page_range(target_ulong start
, target_ulong end
)
607 TranslationBlock
*tb
, *tb_next
;
609 target_ulong phys_start
;
611 #if !defined(CONFIG_USER_ONLY)
614 vp
= virt_page_find(start
>> TARGET_PAGE_BITS
);
617 if (vp
->valid_tag
!= virt_valid_tag
)
619 phys_start
= vp
->phys_addr
+ (start
& ~TARGET_PAGE_MASK
);
624 p
= page_find(phys_start
>> TARGET_PAGE_BITS
);
627 /* we remove all the TBs in the range [start, end[ */
628 /* XXX: see if in some cases it could be faster to invalidate all the code */
632 tb
= (TranslationBlock
*)((long)tb
& ~3);
633 tb_next
= tb
->page_next
[n
];
635 if (!((pc
+ tb
->size
) <= start
|| pc
>= end
)) {
636 tb_phys_invalidate(tb
, -1);
640 #if !defined(CONFIG_USER_ONLY)
641 /* if no code remaining, no need to continue to use slow writes */
643 tlb_unprotect_code(cpu_single_env
, start
);
647 #if !defined(CONFIG_SOFTMMU)
648 static void tb_invalidate_phys_page(target_ulong addr
)
652 TranslationBlock
*tb
;
654 addr
&= TARGET_PAGE_MASK
;
655 p
= page_find(addr
>> TARGET_PAGE_BITS
);
661 tb
= (TranslationBlock
*)((long)tb
& ~3);
662 tb_phys_invalidate(tb
, addr
);
663 tb
= tb
->page_next
[n
];
669 /* add the tb in the target page and protect it if necessary */
670 static inline void tb_alloc_page(TranslationBlock
*tb
,
671 unsigned int n
, unsigned int page_addr
)
674 TranslationBlock
*last_first_tb
;
676 tb
->page_addr
[n
] = page_addr
;
677 p
= page_find(page_addr
>> TARGET_PAGE_BITS
);
678 tb
->page_next
[n
] = p
->first_tb
;
679 last_first_tb
= p
->first_tb
;
680 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
681 invalidate_page_bitmap(p
);
683 #if defined(CONFIG_USER_ONLY)
684 if (p
->flags
& PAGE_WRITE
) {
685 unsigned long host_start
, host_end
, addr
;
688 /* force the host page as non writable (writes will have a
689 page fault + mprotect overhead) */
690 host_start
= page_addr
& host_page_mask
;
691 host_end
= host_start
+ host_page_size
;
693 for(addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
)
694 prot
|= page_get_flags(addr
);
695 mprotect((void *)host_start
, host_page_size
,
696 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
697 #ifdef DEBUG_TB_INVALIDATE
698 printf("protecting code page: 0x%08lx\n",
701 p
->flags
&= ~PAGE_WRITE
;
704 /* if some code is already present, then the pages are already
705 protected. So we handle the case where only the first TB is
706 allocated in a physical page */
707 if (!last_first_tb
) {
708 target_ulong virt_addr
;
710 virt_addr
= (tb
->pc
& TARGET_PAGE_MASK
) + (n
<< TARGET_PAGE_BITS
);
711 tlb_protect_code(cpu_single_env
, virt_addr
);
716 /* Allocate a new translation block. Flush the translation buffer if
717 too many translation blocks or too much generated code. */
718 TranslationBlock
*tb_alloc(unsigned long pc
)
720 TranslationBlock
*tb
;
722 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
723 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
730 /* add a new TB and link it to the physical page tables. phys_page2 is
731 (-1) to indicate that only one page contains the TB. */
732 void tb_link_phys(TranslationBlock
*tb
,
733 target_ulong phys_pc
, target_ulong phys_page2
)
736 TranslationBlock
**ptb
;
738 /* add in the physical hash table */
739 h
= tb_phys_hash_func(phys_pc
);
740 ptb
= &tb_phys_hash
[h
];
741 tb
->phys_hash_next
= *ptb
;
744 /* add in the page list */
745 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
746 if (phys_page2
!= -1)
747 tb_alloc_page(tb
, 1, phys_page2
);
749 tb
->page_addr
[1] = -1;
750 #ifdef DEBUG_TB_CHECK
755 /* link the tb with the other TBs */
756 void tb_link(TranslationBlock
*tb
)
758 #if !defined(CONFIG_USER_ONLY)
763 /* save the code memory mappings (needed to invalidate the code) */
764 addr
= tb
->pc
& TARGET_PAGE_MASK
;
765 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
766 #ifdef DEBUG_TLB_CHECK
767 if (vp
->valid_tag
== virt_valid_tag
&&
768 vp
->phys_addr
!= tb
->page_addr
[0]) {
769 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
770 addr
, tb
->page_addr
[0], vp
->phys_addr
);
773 vp
->phys_addr
= tb
->page_addr
[0];
774 vp
->valid_tag
= virt_valid_tag
;
776 if (tb
->page_addr
[1] != -1) {
777 addr
+= TARGET_PAGE_SIZE
;
778 vp
= virt_page_find_alloc(addr
>> TARGET_PAGE_BITS
);
779 #ifdef DEBUG_TLB_CHECK
780 if (vp
->valid_tag
== virt_valid_tag
&&
781 vp
->phys_addr
!= tb
->page_addr
[1]) {
782 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
783 addr
, tb
->page_addr
[1], vp
->phys_addr
);
786 vp
->phys_addr
= tb
->page_addr
[1];
787 vp
->valid_tag
= virt_valid_tag
;
792 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
793 tb
->jmp_next
[0] = NULL
;
794 tb
->jmp_next
[1] = NULL
;
796 /* init original jump addresses */
797 if (tb
->tb_next_offset
[0] != 0xffff)
798 tb_reset_jump(tb
, 0);
799 if (tb
->tb_next_offset
[1] != 0xffff)
800 tb_reset_jump(tb
, 1);
803 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
804 tb[1].tc_ptr. Return NULL if not found */
805 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
809 TranslationBlock
*tb
;
813 if (tc_ptr
< (unsigned long)code_gen_buffer
||
814 tc_ptr
>= (unsigned long)code_gen_ptr
)
816 /* binary search (cf Knuth) */
819 while (m_min
<= m_max
) {
820 m
= (m_min
+ m_max
) >> 1;
822 v
= (unsigned long)tb
->tc_ptr
;
825 else if (tc_ptr
< v
) {
834 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
836 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
838 TranslationBlock
*tb1
, *tb_next
, **ptb
;
841 tb1
= tb
->jmp_next
[n
];
843 /* find head of list */
846 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
849 tb1
= tb1
->jmp_next
[n1
];
851 /* we are now sure now that tb jumps to tb1 */
854 /* remove tb from the jmp_first list */
855 ptb
= &tb_next
->jmp_first
;
859 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
860 if (n1
== n
&& tb1
== tb
)
862 ptb
= &tb1
->jmp_next
[n1
];
864 *ptb
= tb
->jmp_next
[n
];
865 tb
->jmp_next
[n
] = NULL
;
867 /* suppress the jump to next tb in generated code */
868 tb_reset_jump(tb
, n
);
870 /* suppress jumps in the tb on which we could have jumped */
871 tb_reset_jump_recursive(tb_next
);
875 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
877 tb_reset_jump_recursive2(tb
, 0);
878 tb_reset_jump_recursive2(tb
, 1);
881 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
882 breakpoint is reached */
883 int cpu_breakpoint_insert(CPUState
*env
, uint32_t pc
)
885 #if defined(TARGET_I386)
888 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
889 if (env
->breakpoints
[i
] == pc
)
893 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
895 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
896 tb_invalidate_page_range(pc
, pc
+ 1);
903 /* remove a breakpoint */
904 int cpu_breakpoint_remove(CPUState
*env
, uint32_t pc
)
906 #if defined(TARGET_I386)
908 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
909 if (env
->breakpoints
[i
] == pc
)
914 memmove(&env
->breakpoints
[i
], &env
->breakpoints
[i
+ 1],
915 (env
->nb_breakpoints
- (i
+ 1)) * sizeof(env
->breakpoints
[0]));
916 env
->nb_breakpoints
--;
917 tb_invalidate_page_range(pc
, pc
+ 1);
924 /* enable or disable single step mode. EXCP_DEBUG is returned by the
925 CPU loop after each instruction */
926 void cpu_single_step(CPUState
*env
, int enabled
)
928 #if defined(TARGET_I386)
929 if (env
->singlestep_enabled
!= enabled
) {
930 env
->singlestep_enabled
= enabled
;
931 /* must flush all the translated code to avoid inconsistancies */
932 /* XXX: only flush what is necessary */
938 /* enable or disable low levels log */
939 void cpu_set_log(int log_flags
)
941 loglevel
= log_flags
;
942 if (loglevel
&& !logfile
) {
943 logfile
= fopen(logfilename
, "w");
948 #if !defined(CONFIG_SOFTMMU)
949 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
951 static uint8_t logfile_buf
[4096];
952 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
955 setvbuf(logfile
, NULL
, _IOLBF
, 0);
960 void cpu_set_log_filename(const char *filename
)
962 logfilename
= strdup(filename
);
965 /* mask must never be zero, except for A20 change call */
966 void cpu_interrupt(CPUState
*env
, int mask
)
968 TranslationBlock
*tb
;
969 static int interrupt_lock
;
971 env
->interrupt_request
|= mask
;
972 /* if the cpu is currently executing code, we must unlink it and
973 all the potentially executing TB */
974 tb
= env
->current_tb
;
975 if (tb
&& !testandset(&interrupt_lock
)) {
976 env
->current_tb
= NULL
;
977 tb_reset_jump_recursive(tb
);
983 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
988 fprintf(stderr
, "qemu: fatal: ");
989 vfprintf(stderr
, fmt
, ap
);
990 fprintf(stderr
, "\n");
992 cpu_x86_dump_state(env
, stderr
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
998 #if !defined(CONFIG_USER_ONLY)
1000 /* NOTE: if flush_global is true, also flush global entries (not
1002 void tlb_flush(CPUState
*env
, int flush_global
)
1006 #if defined(DEBUG_TLB)
1007 printf("tlb_flush:\n");
1009 /* must reset current TB so that interrupts cannot modify the
1010 links while we are modifying them */
1011 env
->current_tb
= NULL
;
1013 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1014 env
->tlb_read
[0][i
].address
= -1;
1015 env
->tlb_write
[0][i
].address
= -1;
1016 env
->tlb_read
[1][i
].address
= -1;
1017 env
->tlb_write
[1][i
].address
= -1;
1021 for(i
= 0;i
< CODE_GEN_HASH_SIZE
; i
++)
1024 #if !defined(CONFIG_SOFTMMU)
1025 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1029 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1031 if (addr
== (tlb_entry
->address
&
1032 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)))
1033 tlb_entry
->address
= -1;
1036 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1041 TranslationBlock
*tb
;
1043 #if defined(DEBUG_TLB)
1044 printf("tlb_flush_page: 0x%08x\n", addr
);
1046 /* must reset current TB so that interrupts cannot modify the
1047 links while we are modifying them */
1048 env
->current_tb
= NULL
;
1050 addr
&= TARGET_PAGE_MASK
;
1051 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1052 tlb_flush_entry(&env
->tlb_read
[0][i
], addr
);
1053 tlb_flush_entry(&env
->tlb_write
[0][i
], addr
);
1054 tlb_flush_entry(&env
->tlb_read
[1][i
], addr
);
1055 tlb_flush_entry(&env
->tlb_write
[1][i
], addr
);
1057 /* remove from the virtual pc hash table all the TB at this
1060 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1061 if (vp
&& vp
->valid_tag
== virt_valid_tag
) {
1062 p
= page_find(vp
->phys_addr
>> TARGET_PAGE_BITS
);
1064 /* we remove all the links to the TBs in this virtual page */
1066 while (tb
!= NULL
) {
1068 tb
= (TranslationBlock
*)((long)tb
& ~3);
1069 if ((tb
->pc
& TARGET_PAGE_MASK
) == addr
||
1070 ((tb
->pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
) == addr
) {
1073 tb
= tb
->page_next
[n
];
1079 #if !defined(CONFIG_SOFTMMU)
1080 if (addr
< MMAP_AREA_END
)
1081 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1085 static inline void tlb_protect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1087 if (addr
== (tlb_entry
->address
&
1088 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1089 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
&&
1090 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
) {
1091 tlb_entry
->address
|= IO_MEM_CODE
;
1092 tlb_entry
->addend
-= (unsigned long)phys_ram_base
;
1096 /* update the TLBs so that writes to code in the virtual page 'addr'
1098 static void tlb_protect_code(CPUState
*env
, uint32_t addr
)
1102 addr
&= TARGET_PAGE_MASK
;
1103 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1104 tlb_protect_code1(&env
->tlb_write
[0][i
], addr
);
1105 tlb_protect_code1(&env
->tlb_write
[1][i
], addr
);
1106 #if !defined(CONFIG_SOFTMMU)
1107 /* NOTE: as we generated the code for this page, it is already at
1109 if (addr
< MMAP_AREA_END
)
1110 mprotect((void *)addr
, TARGET_PAGE_SIZE
, PROT_READ
);
1114 static inline void tlb_unprotect_code1(CPUTLBEntry
*tlb_entry
, uint32_t addr
)
1116 if (addr
== (tlb_entry
->address
&
1117 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) &&
1118 (tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
) {
1119 tlb_entry
->address
&= TARGET_PAGE_MASK
;
1120 tlb_entry
->addend
+= (unsigned long)phys_ram_base
;
1124 /* update the TLB so that writes in virtual page 'addr' are no longer
1125 tested self modifying code */
1126 static void tlb_unprotect_code(CPUState
*env
, uint32_t addr
)
1130 addr
&= TARGET_PAGE_MASK
;
1131 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1132 tlb_unprotect_code1(&env
->tlb_write
[0][i
], addr
);
1133 tlb_unprotect_code1(&env
->tlb_write
[1][i
], addr
);
1136 static inline void tlb_unprotect_code2(CPUTLBEntry
*tlb_entry
,
1139 if ((tlb_entry
->address
& ~TARGET_PAGE_MASK
) == IO_MEM_CODE
&&
1140 ((tlb_entry
->address
& TARGET_PAGE_MASK
) + tlb_entry
->addend
) == phys_addr
) {
1141 tlb_entry
->address
&= TARGET_PAGE_MASK
;
1142 tlb_entry
->addend
+= (unsigned long)phys_ram_base
;
1146 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1147 tested self modifying code */
1148 /* XXX: find a way to improve it */
1149 static void tlb_unprotect_code_phys(CPUState
*env
, uint32_t phys_addr
)
1153 phys_addr
&= TARGET_PAGE_MASK
;
1154 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1155 tlb_unprotect_code2(&env
->tlb_write
[0][i
], phys_addr
);
1156 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1157 tlb_unprotect_code2(&env
->tlb_write
[1][i
], phys_addr
);
1160 /* add a new TLB entry. At most one entry for a given virtual
1161 address is permitted. */
1162 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1163 int is_user
, int is_softmmu
)
1167 TranslationBlock
*first_tb
;
1169 target_ulong address
, addend
;
1172 p
= page_find(paddr
>> TARGET_PAGE_BITS
);
1174 pd
= IO_MEM_UNASSIGNED
;
1177 pd
= p
->phys_offset
;
1178 first_tb
= p
->first_tb
;
1180 #if defined(DEBUG_TLB)
1181 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1182 vaddr
, paddr
, prot
, is_user
, (first_tb
!= NULL
), is_softmmu
, pd
);
1186 #if !defined(CONFIG_SOFTMMU)
1190 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1191 /* IO memory case */
1192 address
= vaddr
| pd
;
1195 /* standard memory */
1197 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1200 index
= (vaddr
>> 12) & (CPU_TLB_SIZE
- 1);
1202 if (prot
& PROT_READ
) {
1203 env
->tlb_read
[is_user
][index
].address
= address
;
1204 env
->tlb_read
[is_user
][index
].addend
= addend
;
1206 env
->tlb_read
[is_user
][index
].address
= -1;
1207 env
->tlb_read
[is_user
][index
].addend
= -1;
1209 if (prot
& PROT_WRITE
) {
1210 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
) {
1211 /* ROM: access is ignored (same as unassigned) */
1212 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_ROM
;
1213 env
->tlb_write
[is_user
][index
].addend
= addend
- (unsigned long)phys_ram_base
;
1214 } else if (first_tb
) {
1215 /* if code is present, we use a specific memory
1216 handler. It works only for physical memory access */
1217 env
->tlb_write
[is_user
][index
].address
= vaddr
| IO_MEM_CODE
;
1218 env
->tlb_write
[is_user
][index
].addend
= addend
- (unsigned long)phys_ram_base
;
1220 env
->tlb_write
[is_user
][index
].address
= address
;
1221 env
->tlb_write
[is_user
][index
].addend
= addend
;
1224 env
->tlb_write
[is_user
][index
].address
= -1;
1225 env
->tlb_write
[is_user
][index
].addend
= -1;
1228 #if !defined(CONFIG_SOFTMMU)
1230 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1231 /* IO access: no mapping is done as it will be handled by the
1233 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1237 if (prot
& PROT_WRITE
) {
1238 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
|| first_tb
) {
1239 /* ROM: we do as if code was inside */
1240 /* if code is present, we only map as read only and save the
1244 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
);
1247 vp
->valid_tag
= virt_valid_tag
;
1248 prot
&= ~PAGE_WRITE
;
1251 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1252 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1253 if (map_addr
== MAP_FAILED
) {
1254 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1263 /* called from signal handler: invalidate the code and unprotect the
1264 page. Return TRUE if the fault was succesfully handled. */
1265 int page_unprotect(unsigned long addr
)
1267 #if !defined(CONFIG_SOFTMMU)
1270 #if defined(DEBUG_TLB)
1271 printf("page_unprotect: addr=0x%08x\n", addr
);
1273 addr
&= TARGET_PAGE_MASK
;
1274 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1277 /* NOTE: in this case, validate_tag is _not_ tested as it
1278 validates only the code TLB */
1279 if (vp
->valid_tag
!= virt_valid_tag
)
1281 if (!(vp
->prot
& PAGE_WRITE
))
1283 #if defined(DEBUG_TLB)
1284 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1285 addr
, vp
->phys_addr
, vp
->prot
);
1287 tb_invalidate_phys_page(vp
->phys_addr
);
1288 mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
);
1297 void tlb_flush(CPUState
*env
, int flush_global
)
1301 void tlb_flush_page(CPUState
*env
, uint32_t addr
)
1305 void tlb_flush_page_write(CPUState
*env
, uint32_t addr
)
1309 int tlb_set_page(CPUState
*env
, uint32_t vaddr
, uint32_t paddr
, int prot
,
1310 int is_user
, int is_softmmu
)
1315 /* dump memory mappings */
1316 void page_dump(FILE *f
)
1318 unsigned long start
, end
;
1319 int i
, j
, prot
, prot1
;
1322 fprintf(f
, "%-8s %-8s %-8s %s\n",
1323 "start", "end", "size", "prot");
1327 for(i
= 0; i
<= L1_SIZE
; i
++) {
1332 for(j
= 0;j
< L2_SIZE
; j
++) {
1337 if (prot1
!= prot
) {
1338 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1340 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1341 start
, end
, end
- start
,
1342 prot
& PAGE_READ
? 'r' : '-',
1343 prot
& PAGE_WRITE
? 'w' : '-',
1344 prot
& PAGE_EXEC
? 'x' : '-');
1358 int page_get_flags(unsigned long address
)
1362 p
= page_find(address
>> TARGET_PAGE_BITS
);
1368 /* modify the flags of a page and invalidate the code if
1369 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1370 depending on PAGE_WRITE */
1371 void page_set_flags(unsigned long start
, unsigned long end
, int flags
)
1376 start
= start
& TARGET_PAGE_MASK
;
1377 end
= TARGET_PAGE_ALIGN(end
);
1378 if (flags
& PAGE_WRITE
)
1379 flags
|= PAGE_WRITE_ORG
;
1380 spin_lock(&tb_lock
);
1381 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1382 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1383 /* if the write protection is set, then we invalidate the code
1385 if (!(p
->flags
& PAGE_WRITE
) &&
1386 (flags
& PAGE_WRITE
) &&
1388 tb_invalidate_phys_page(addr
);
1392 spin_unlock(&tb_lock
);
1395 /* called from signal handler: invalidate the code and unprotect the
1396 page. Return TRUE if the fault was succesfully handled. */
1397 int page_unprotect(unsigned long address
)
1399 unsigned int page_index
, prot
, pindex
;
1401 unsigned long host_start
, host_end
, addr
;
1403 host_start
= address
& host_page_mask
;
1404 page_index
= host_start
>> TARGET_PAGE_BITS
;
1405 p1
= page_find(page_index
);
1408 host_end
= host_start
+ host_page_size
;
1411 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1415 /* if the page was really writable, then we change its
1416 protection back to writable */
1417 if (prot
& PAGE_WRITE_ORG
) {
1418 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1419 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1420 mprotect((void *)host_start
, host_page_size
,
1421 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1422 p1
[pindex
].flags
|= PAGE_WRITE
;
1423 /* and since the content will be modified, we must invalidate
1424 the corresponding translated code. */
1425 tb_invalidate_phys_page(address
);
1426 #ifdef DEBUG_TB_CHECK
1427 tb_invalidate_check(address
);
1435 /* call this function when system calls directly modify a memory area */
1436 void page_unprotect_range(uint8_t *data
, unsigned long data_size
)
1438 unsigned long start
, end
, addr
;
1440 start
= (unsigned long)data
;
1441 end
= start
+ data_size
;
1442 start
&= TARGET_PAGE_MASK
;
1443 end
= TARGET_PAGE_ALIGN(end
);
1444 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1445 page_unprotect(addr
);
1449 #endif /* defined(CONFIG_USER_ONLY) */
1451 /* register physical memory. 'size' must be a multiple of the target
1452 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1454 void cpu_register_physical_memory(unsigned long start_addr
, unsigned long size
,
1457 unsigned long addr
, end_addr
;
1460 end_addr
= start_addr
+ size
;
1461 for(addr
= start_addr
; addr
< end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1462 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1463 p
->phys_offset
= phys_offset
;
1464 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
)
1465 phys_offset
+= TARGET_PAGE_SIZE
;
1469 static uint32_t unassigned_mem_readb(uint32_t addr
)
1474 static void unassigned_mem_writeb(uint32_t addr
, uint32_t val
)
1478 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
1479 unassigned_mem_readb
,
1480 unassigned_mem_readb
,
1481 unassigned_mem_readb
,
1484 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
1485 unassigned_mem_writeb
,
1486 unassigned_mem_writeb
,
1487 unassigned_mem_writeb
,
1490 /* self modifying code support in soft mmu mode : writing to a page
1491 containing code comes to these functions */
1493 static void code_mem_writeb(uint32_t addr
, uint32_t val
)
1495 #if !defined(CONFIG_USER_ONLY)
1496 tb_invalidate_phys_page_fast(addr
, 1);
1498 stb_raw(phys_ram_base
+ addr
, val
);
1501 static void code_mem_writew(uint32_t addr
, uint32_t val
)
1503 #if !defined(CONFIG_USER_ONLY)
1504 tb_invalidate_phys_page_fast(addr
, 2);
1506 stw_raw(phys_ram_base
+ addr
, val
);
1509 static void code_mem_writel(uint32_t addr
, uint32_t val
)
1511 #if !defined(CONFIG_USER_ONLY)
1512 tb_invalidate_phys_page_fast(addr
, 4);
1514 stl_raw(phys_ram_base
+ addr
, val
);
1517 static CPUReadMemoryFunc
*code_mem_read
[3] = {
1518 NULL
, /* never used */
1519 NULL
, /* never used */
1520 NULL
, /* never used */
1523 static CPUWriteMemoryFunc
*code_mem_write
[3] = {
1529 static void io_mem_init(void)
1531 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, code_mem_read
, unassigned_mem_write
);
1532 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
);
1533 cpu_register_io_memory(IO_MEM_CODE
>> IO_MEM_SHIFT
, code_mem_read
, code_mem_write
);
1537 /* mem_read and mem_write are arrays of functions containing the
1538 function to access byte (index 0), word (index 1) and dword (index
1539 2). All functions must be supplied. If io_index is non zero, the
1540 corresponding io zone is modified. If it is zero, a new io zone is
1541 allocated. The return value can be used with
1542 cpu_register_physical_memory(). (-1) is returned if error. */
1543 int cpu_register_io_memory(int io_index
,
1544 CPUReadMemoryFunc
**mem_read
,
1545 CPUWriteMemoryFunc
**mem_write
)
1549 if (io_index
<= 0) {
1550 if (io_index
>= IO_MEM_NB_ENTRIES
)
1552 io_index
= io_mem_nb
++;
1554 if (io_index
>= IO_MEM_NB_ENTRIES
)
1558 for(i
= 0;i
< 3; i
++) {
1559 io_mem_read
[io_index
][i
] = mem_read
[i
];
1560 io_mem_write
[io_index
][i
] = mem_write
[i
];
1562 return io_index
<< IO_MEM_SHIFT
;
1565 /* physical memory access (slow version, mainly for debug) */
1566 #if defined(CONFIG_USER_ONLY)
1567 void cpu_physical_memory_rw(CPUState
*env
, uint8_t *buf
, target_ulong addr
,
1568 int len
, int is_write
)
1574 page
= addr
& TARGET_PAGE_MASK
;
1575 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1578 flags
= page_get_flags(page
);
1579 if (!(flags
& PAGE_VALID
))
1582 if (!(flags
& PAGE_WRITE
))
1584 memcpy((uint8_t *)addr
, buf
, len
);
1586 if (!(flags
& PAGE_READ
))
1588 memcpy(buf
, (uint8_t *)addr
, len
);
1596 void cpu_physical_memory_rw(CPUState
*env
, uint8_t *buf
, target_ulong addr
,
1597 int len
, int is_write
)
1602 target_ulong page
, pd
;
1606 page
= addr
& TARGET_PAGE_MASK
;
1607 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1610 p
= page_find(page
>> TARGET_PAGE_BITS
);
1612 pd
= IO_MEM_UNASSIGNED
;
1614 pd
= p
->phys_offset
;
1618 if ((pd
& ~TARGET_PAGE_MASK
) != 0) {
1619 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1620 if (l
>= 4 && ((addr
& 3) == 0)) {
1621 /* 32 bit read access */
1623 io_mem_write
[io_index
][2](addr
, val
);
1625 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1626 /* 16 bit read access */
1627 val
= lduw_raw(buf
);
1628 io_mem_write
[io_index
][1](addr
, val
);
1632 val
= ldub_raw(buf
);
1633 io_mem_write
[io_index
][0](addr
, val
);
1638 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
1639 (addr
& ~TARGET_PAGE_MASK
);
1640 memcpy(ptr
, buf
, l
);
1643 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
1644 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_CODE
) {
1646 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
1647 if (l
>= 4 && ((addr
& 3) == 0)) {
1648 /* 32 bit read access */
1649 val
= io_mem_read
[io_index
][2](addr
);
1652 } else if (l
>= 2 && ((addr
& 1) == 0)) {
1653 /* 16 bit read access */
1654 val
= io_mem_read
[io_index
][1](addr
);
1659 val
= io_mem_read
[io_index
][0](addr
);
1665 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
1666 (addr
& ~TARGET_PAGE_MASK
);
1667 memcpy(buf
, ptr
, l
);
1677 /* virtual memory access for debug */
1678 int cpu_memory_rw_debug(CPUState
*env
,
1679 uint8_t *buf
, target_ulong addr
, int len
, int is_write
)
1682 target_ulong page
, phys_addr
;
1685 page
= addr
& TARGET_PAGE_MASK
;
1686 phys_addr
= cpu_get_phys_page_debug(env
, page
);
1687 /* if no physical page mapped, return an error */
1688 if (phys_addr
== -1)
1690 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
1693 cpu_physical_memory_rw(env
, buf
,
1694 phys_addr
+ (addr
& ~TARGET_PAGE_MASK
), l
,
1703 #if !defined(CONFIG_USER_ONLY)
1705 #define MMUSUFFIX _cmmu
1706 #define GETPC() NULL
1707 #define env cpu_single_env
1710 #include "softmmu_template.h"
1713 #include "softmmu_template.h"
1716 #include "softmmu_template.h"
1719 #include "softmmu_template.h"