2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 #include <sys/types.h>
37 #if defined(CONFIG_USER_ONLY)
41 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_UNASSIGNED
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
50 //#define DEBUG_IOPORT
51 //#define DEBUG_SUBPAGE
53 #if !defined(CONFIG_USER_ONLY)
54 /* TB consistency checks only implemented for usermode emulation. */
58 /* threshold to flush the translated code buffer */
59 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
61 #define SMC_BITMAP_USE_THRESHOLD 10
63 #define MMAP_AREA_START 0x00000000
64 #define MMAP_AREA_END 0xa8000000
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77 #define TARGET_PHYS_ADDR_SPACE_BITS 32
80 TranslationBlock tbs
[CODE_GEN_MAX_BLOCKS
];
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 uint8_t code_gen_buffer
[CODE_GEN_BUFFER_SIZE
] __attribute__((aligned (32)));
87 uint8_t *code_gen_ptr
;
91 uint8_t *phys_ram_base
;
92 uint8_t *phys_ram_dirty
;
93 static ram_addr_t phys_ram_alloc_offset
= 0;
96 /* current CPU in the current thread. It is only valid inside
98 CPUState
*cpu_single_env
;
100 typedef struct PageDesc
{
101 /* list of TBs intersecting this ram page */
102 TranslationBlock
*first_tb
;
103 /* in order to optimize self modifying code, we count the number
104 of lookups we do to a given page to use a bitmap */
105 unsigned int code_write_count
;
106 uint8_t *code_bitmap
;
107 #if defined(CONFIG_USER_ONLY)
112 typedef struct PhysPageDesc
{
113 /* offset in host memory of the page + io_index in the low 12 bits */
114 uint32_t phys_offset
;
118 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119 /* XXX: this is a temporary hack for alpha target.
120 * In the future, this is to be replaced by a multi-level table
121 * to actually be able to handle the complete 64 bits address space.
123 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
128 #define L1_SIZE (1 << L1_BITS)
129 #define L2_SIZE (1 << L2_BITS)
131 static void io_mem_init(void);
133 unsigned long qemu_real_host_page_size
;
134 unsigned long qemu_host_page_bits
;
135 unsigned long qemu_host_page_size
;
136 unsigned long qemu_host_page_mask
;
138 /* XXX: for system emulation, it could just be an array */
139 static PageDesc
*l1_map
[L1_SIZE
];
140 PhysPageDesc
**l1_phys_map
;
142 /* io memory support */
143 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
144 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
145 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
146 static int io_mem_nb
;
147 #if defined(CONFIG_SOFTMMU)
148 static int io_mem_watch
;
152 char *logfilename
= "/tmp/qemu.log";
155 static int log_append
= 0;
158 static int tlb_flush_count
;
159 static int tb_flush_count
;
160 static int tb_phys_invalidate_count
;
162 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163 typedef struct subpage_t
{
164 target_phys_addr_t base
;
165 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
];
166 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
];
167 void *opaque
[TARGET_PAGE_SIZE
];
170 static void page_init(void)
172 /* NOTE: we can always suppose that qemu_host_page_size >=
176 SYSTEM_INFO system_info
;
179 GetSystemInfo(&system_info
);
180 qemu_real_host_page_size
= system_info
.dwPageSize
;
182 VirtualProtect(code_gen_buffer
, sizeof(code_gen_buffer
),
183 PAGE_EXECUTE_READWRITE
, &old_protect
);
186 qemu_real_host_page_size
= getpagesize();
188 unsigned long start
, end
;
190 start
= (unsigned long)code_gen_buffer
;
191 start
&= ~(qemu_real_host_page_size
- 1);
193 end
= (unsigned long)code_gen_buffer
+ sizeof(code_gen_buffer
);
194 end
+= qemu_real_host_page_size
- 1;
195 end
&= ~(qemu_real_host_page_size
- 1);
197 mprotect((void *)start
, end
- start
,
198 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
202 if (qemu_host_page_size
== 0)
203 qemu_host_page_size
= qemu_real_host_page_size
;
204 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
205 qemu_host_page_size
= TARGET_PAGE_SIZE
;
206 qemu_host_page_bits
= 0;
207 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
208 qemu_host_page_bits
++;
209 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
210 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
211 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
214 static inline PageDesc
*page_find_alloc(unsigned int index
)
218 lp
= &l1_map
[index
>> L2_BITS
];
221 /* allocate if not found */
222 p
= qemu_malloc(sizeof(PageDesc
) * L2_SIZE
);
223 memset(p
, 0, sizeof(PageDesc
) * L2_SIZE
);
226 return p
+ (index
& (L2_SIZE
- 1));
229 static inline PageDesc
*page_find(unsigned int index
)
233 p
= l1_map
[index
>> L2_BITS
];
236 return p
+ (index
& (L2_SIZE
- 1));
239 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
244 p
= (void **)l1_phys_map
;
245 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
247 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
250 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
253 /* allocate if not found */
256 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
257 memset(p
, 0, sizeof(void *) * L1_SIZE
);
261 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
265 /* allocate if not found */
268 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
270 for (i
= 0; i
< L2_SIZE
; i
++)
271 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
273 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
276 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
278 return phys_page_find_alloc(index
, 0);
281 #if !defined(CONFIG_USER_ONLY)
282 static void tlb_protect_code(ram_addr_t ram_addr
);
283 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
287 void cpu_exec_init(CPUState
*env
)
293 code_gen_ptr
= code_gen_buffer
;
297 env
->next_cpu
= NULL
;
300 while (*penv
!= NULL
) {
301 penv
= (CPUState
**)&(*penv
)->next_cpu
;
304 env
->cpu_index
= cpu_index
;
305 env
->nb_watchpoints
= 0;
309 static inline void invalidate_page_bitmap(PageDesc
*p
)
311 if (p
->code_bitmap
) {
312 qemu_free(p
->code_bitmap
);
313 p
->code_bitmap
= NULL
;
315 p
->code_write_count
= 0;
318 /* set to NULL all the 'first_tb' fields in all PageDescs */
319 static void page_flush_tb(void)
324 for(i
= 0; i
< L1_SIZE
; i
++) {
327 for(j
= 0; j
< L2_SIZE
; j
++) {
329 invalidate_page_bitmap(p
);
336 /* flush all the translation blocks */
337 /* XXX: tb_flush is currently not thread safe */
338 void tb_flush(CPUState
*env1
)
341 #if defined(DEBUG_FLUSH)
342 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343 code_gen_ptr
- code_gen_buffer
,
345 nb_tbs
> 0 ? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0);
349 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
350 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
353 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
356 code_gen_ptr
= code_gen_buffer
;
357 /* XXX: flush processor icache at this point if cache flush is
362 #ifdef DEBUG_TB_CHECK
364 static void tb_invalidate_check(target_ulong address
)
366 TranslationBlock
*tb
;
368 address
&= TARGET_PAGE_MASK
;
369 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
370 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
371 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
372 address
>= tb
->pc
+ tb
->size
)) {
373 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374 address
, (long)tb
->pc
, tb
->size
);
380 /* verify that all the pages have correct rights for code */
381 static void tb_page_check(void)
383 TranslationBlock
*tb
;
384 int i
, flags1
, flags2
;
386 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
387 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
388 flags1
= page_get_flags(tb
->pc
);
389 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
390 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
391 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
398 void tb_jmp_check(TranslationBlock
*tb
)
400 TranslationBlock
*tb1
;
403 /* suppress any remaining jumps to this TB */
407 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
410 tb1
= tb1
->jmp_next
[n1
];
412 /* check end of list */
414 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
420 /* invalidate one TB */
421 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
424 TranslationBlock
*tb1
;
428 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
431 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
435 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
437 TranslationBlock
*tb1
;
443 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
445 *ptb
= tb1
->page_next
[n1
];
448 ptb
= &tb1
->page_next
[n1
];
452 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
454 TranslationBlock
*tb1
, **ptb
;
457 ptb
= &tb
->jmp_next
[n
];
460 /* find tb(n) in circular list */
464 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
465 if (n1
== n
&& tb1
== tb
)
468 ptb
= &tb1
->jmp_first
;
470 ptb
= &tb1
->jmp_next
[n1
];
473 /* now we can suppress tb(n) from the list */
474 *ptb
= tb
->jmp_next
[n
];
476 tb
->jmp_next
[n
] = NULL
;
480 /* reset the jump entry 'n' of a TB so that it is not chained to
482 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
484 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
487 static inline void tb_phys_invalidate(TranslationBlock
*tb
, unsigned int page_addr
)
492 target_ulong phys_pc
;
493 TranslationBlock
*tb1
, *tb2
;
495 /* remove the TB from the hash list */
496 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
497 h
= tb_phys_hash_func(phys_pc
);
498 tb_remove(&tb_phys_hash
[h
], tb
,
499 offsetof(TranslationBlock
, phys_hash_next
));
501 /* remove the TB from the page list */
502 if (tb
->page_addr
[0] != page_addr
) {
503 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
504 tb_page_remove(&p
->first_tb
, tb
);
505 invalidate_page_bitmap(p
);
507 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
508 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
509 tb_page_remove(&p
->first_tb
, tb
);
510 invalidate_page_bitmap(p
);
513 tb_invalidated_flag
= 1;
515 /* remove the TB from the hash list */
516 h
= tb_jmp_cache_hash_func(tb
->pc
);
517 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
518 if (env
->tb_jmp_cache
[h
] == tb
)
519 env
->tb_jmp_cache
[h
] = NULL
;
522 /* suppress this TB from the two jump lists */
523 tb_jmp_remove(tb
, 0);
524 tb_jmp_remove(tb
, 1);
526 /* suppress any remaining jumps to this TB */
532 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
533 tb2
= tb1
->jmp_next
[n1
];
534 tb_reset_jump(tb1
, n1
);
535 tb1
->jmp_next
[n1
] = NULL
;
538 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
540 tb_phys_invalidate_count
++;
543 static inline void set_bits(uint8_t *tab
, int start
, int len
)
549 mask
= 0xff << (start
& 7);
550 if ((start
& ~7) == (end
& ~7)) {
552 mask
&= ~(0xff << (end
& 7));
557 start
= (start
+ 8) & ~7;
559 while (start
< end1
) {
564 mask
= ~(0xff << (end
& 7));
570 static void build_page_bitmap(PageDesc
*p
)
572 int n
, tb_start
, tb_end
;
573 TranslationBlock
*tb
;
575 p
->code_bitmap
= qemu_malloc(TARGET_PAGE_SIZE
/ 8);
578 memset(p
->code_bitmap
, 0, TARGET_PAGE_SIZE
/ 8);
583 tb
= (TranslationBlock
*)((long)tb
& ~3);
584 /* NOTE: this is subtle as a TB may span two physical pages */
586 /* NOTE: tb_end may be after the end of the page, but
587 it is not a problem */
588 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
589 tb_end
= tb_start
+ tb
->size
;
590 if (tb_end
> TARGET_PAGE_SIZE
)
591 tb_end
= TARGET_PAGE_SIZE
;
594 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
596 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
597 tb
= tb
->page_next
[n
];
601 #ifdef TARGET_HAS_PRECISE_SMC
603 static void tb_gen_code(CPUState
*env
,
604 target_ulong pc
, target_ulong cs_base
, int flags
,
607 TranslationBlock
*tb
;
609 target_ulong phys_pc
, phys_page2
, virt_page2
;
612 phys_pc
= get_phys_addr_code(env
, pc
);
615 /* flush must be done */
617 /* cannot fail at this point */
620 tc_ptr
= code_gen_ptr
;
622 tb
->cs_base
= cs_base
;
625 cpu_gen_code(env
, tb
, CODE_GEN_MAX_SIZE
, &code_gen_size
);
626 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
628 /* check next page if needed */
629 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
631 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
632 phys_page2
= get_phys_addr_code(env
, virt_page2
);
634 tb_link_phys(tb
, phys_pc
, phys_page2
);
638 /* invalidate all TBs which intersect with the target physical page
639 starting in range [start;end[. NOTE: start and end must refer to
640 the same physical page. 'is_cpu_write_access' should be true if called
641 from a real cpu write access: the virtual CPU will exit the current
642 TB if code is modified inside this TB. */
643 void tb_invalidate_phys_page_range(target_ulong start
, target_ulong end
,
644 int is_cpu_write_access
)
646 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
647 CPUState
*env
= cpu_single_env
;
649 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
650 target_ulong tb_start
, tb_end
;
651 target_ulong current_pc
, current_cs_base
;
653 p
= page_find(start
>> TARGET_PAGE_BITS
);
656 if (!p
->code_bitmap
&&
657 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
658 is_cpu_write_access
) {
659 /* build code bitmap */
660 build_page_bitmap(p
);
663 /* we remove all the TBs in the range [start, end[ */
664 /* XXX: see if in some cases it could be faster to invalidate all the code */
665 current_tb_not_found
= is_cpu_write_access
;
666 current_tb_modified
= 0;
667 current_tb
= NULL
; /* avoid warning */
668 current_pc
= 0; /* avoid warning */
669 current_cs_base
= 0; /* avoid warning */
670 current_flags
= 0; /* avoid warning */
674 tb
= (TranslationBlock
*)((long)tb
& ~3);
675 tb_next
= tb
->page_next
[n
];
676 /* NOTE: this is subtle as a TB may span two physical pages */
678 /* NOTE: tb_end may be after the end of the page, but
679 it is not a problem */
680 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
681 tb_end
= tb_start
+ tb
->size
;
683 tb_start
= tb
->page_addr
[1];
684 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
686 if (!(tb_end
<= start
|| tb_start
>= end
)) {
687 #ifdef TARGET_HAS_PRECISE_SMC
688 if (current_tb_not_found
) {
689 current_tb_not_found
= 0;
691 if (env
->mem_write_pc
) {
692 /* now we have a real cpu fault */
693 current_tb
= tb_find_pc(env
->mem_write_pc
);
696 if (current_tb
== tb
&&
697 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
698 /* If we are modifying the current TB, we must stop
699 its execution. We could be more precise by checking
700 that the modification is after the current PC, but it
701 would require a specialized function to partially
702 restore the CPU state */
704 current_tb_modified
= 1;
705 cpu_restore_state(current_tb
, env
,
706 env
->mem_write_pc
, NULL
);
707 #if defined(TARGET_I386)
708 current_flags
= env
->hflags
;
709 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
710 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
711 current_pc
= current_cs_base
+ env
->eip
;
713 #error unsupported CPU
716 #endif /* TARGET_HAS_PRECISE_SMC */
717 /* we need to do that to handle the case where a signal
718 occurs while doing tb_phys_invalidate() */
721 saved_tb
= env
->current_tb
;
722 env
->current_tb
= NULL
;
724 tb_phys_invalidate(tb
, -1);
726 env
->current_tb
= saved_tb
;
727 if (env
->interrupt_request
&& env
->current_tb
)
728 cpu_interrupt(env
, env
->interrupt_request
);
733 #if !defined(CONFIG_USER_ONLY)
734 /* if no code remaining, no need to continue to use slow writes */
736 invalidate_page_bitmap(p
);
737 if (is_cpu_write_access
) {
738 tlb_unprotect_code_phys(env
, start
, env
->mem_write_vaddr
);
742 #ifdef TARGET_HAS_PRECISE_SMC
743 if (current_tb_modified
) {
744 /* we generate a block containing just the instruction
745 modifying the memory. It will ensure that it cannot modify
747 env
->current_tb
= NULL
;
748 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
750 cpu_resume_from_signal(env
, NULL
);
755 /* len must be <= 8 and start must be a multiple of len */
756 static inline void tb_invalidate_phys_page_fast(target_ulong start
, int len
)
763 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764 cpu_single_env
->mem_write_vaddr
, len
,
766 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
770 p
= page_find(start
>> TARGET_PAGE_BITS
);
773 if (p
->code_bitmap
) {
774 offset
= start
& ~TARGET_PAGE_MASK
;
775 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
776 if (b
& ((1 << len
) - 1))
780 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
784 #if !defined(CONFIG_SOFTMMU)
785 static void tb_invalidate_phys_page(target_ulong addr
,
786 unsigned long pc
, void *puc
)
788 int n
, current_flags
, current_tb_modified
;
789 target_ulong current_pc
, current_cs_base
;
791 TranslationBlock
*tb
, *current_tb
;
792 #ifdef TARGET_HAS_PRECISE_SMC
793 CPUState
*env
= cpu_single_env
;
796 addr
&= TARGET_PAGE_MASK
;
797 p
= page_find(addr
>> TARGET_PAGE_BITS
);
801 current_tb_modified
= 0;
803 current_pc
= 0; /* avoid warning */
804 current_cs_base
= 0; /* avoid warning */
805 current_flags
= 0; /* avoid warning */
806 #ifdef TARGET_HAS_PRECISE_SMC
808 current_tb
= tb_find_pc(pc
);
813 tb
= (TranslationBlock
*)((long)tb
& ~3);
814 #ifdef TARGET_HAS_PRECISE_SMC
815 if (current_tb
== tb
&&
816 !(current_tb
->cflags
& CF_SINGLE_INSN
)) {
817 /* If we are modifying the current TB, we must stop
818 its execution. We could be more precise by checking
819 that the modification is after the current PC, but it
820 would require a specialized function to partially
821 restore the CPU state */
823 current_tb_modified
= 1;
824 cpu_restore_state(current_tb
, env
, pc
, puc
);
825 #if defined(TARGET_I386)
826 current_flags
= env
->hflags
;
827 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
828 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
829 current_pc
= current_cs_base
+ env
->eip
;
831 #error unsupported CPU
834 #endif /* TARGET_HAS_PRECISE_SMC */
835 tb_phys_invalidate(tb
, addr
);
836 tb
= tb
->page_next
[n
];
839 #ifdef TARGET_HAS_PRECISE_SMC
840 if (current_tb_modified
) {
841 /* we generate a block containing just the instruction
842 modifying the memory. It will ensure that it cannot modify
844 env
->current_tb
= NULL
;
845 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
,
847 cpu_resume_from_signal(env
, puc
);
853 /* add the tb in the target page and protect it if necessary */
854 static inline void tb_alloc_page(TranslationBlock
*tb
,
855 unsigned int n
, target_ulong page_addr
)
858 TranslationBlock
*last_first_tb
;
860 tb
->page_addr
[n
] = page_addr
;
861 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
862 tb
->page_next
[n
] = p
->first_tb
;
863 last_first_tb
= p
->first_tb
;
864 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
865 invalidate_page_bitmap(p
);
867 #if defined(TARGET_HAS_SMC) || 1
869 #if defined(CONFIG_USER_ONLY)
870 if (p
->flags
& PAGE_WRITE
) {
875 /* force the host page as non writable (writes will have a
876 page fault + mprotect overhead) */
877 page_addr
&= qemu_host_page_mask
;
879 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
880 addr
+= TARGET_PAGE_SIZE
) {
882 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
886 p2
->flags
&= ~PAGE_WRITE
;
887 page_get_flags(addr
);
889 mprotect(g2h(page_addr
), qemu_host_page_size
,
890 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
891 #ifdef DEBUG_TB_INVALIDATE
892 printf("protecting code page: 0x%08lx\n",
897 /* if some code is already present, then the pages are already
898 protected. So we handle the case where only the first TB is
899 allocated in a physical page */
900 if (!last_first_tb
) {
901 tlb_protect_code(page_addr
);
905 #endif /* TARGET_HAS_SMC */
908 /* Allocate a new translation block. Flush the translation buffer if
909 too many translation blocks or too much generated code. */
910 TranslationBlock
*tb_alloc(target_ulong pc
)
912 TranslationBlock
*tb
;
914 if (nb_tbs
>= CODE_GEN_MAX_BLOCKS
||
915 (code_gen_ptr
- code_gen_buffer
) >= CODE_GEN_BUFFER_MAX_SIZE
)
923 /* add a new TB and link it to the physical page tables. phys_page2 is
924 (-1) to indicate that only one page contains the TB. */
925 void tb_link_phys(TranslationBlock
*tb
,
926 target_ulong phys_pc
, target_ulong phys_page2
)
929 TranslationBlock
**ptb
;
931 /* add in the physical hash table */
932 h
= tb_phys_hash_func(phys_pc
);
933 ptb
= &tb_phys_hash
[h
];
934 tb
->phys_hash_next
= *ptb
;
937 /* add in the page list */
938 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
939 if (phys_page2
!= -1)
940 tb_alloc_page(tb
, 1, phys_page2
);
942 tb
->page_addr
[1] = -1;
944 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
945 tb
->jmp_next
[0] = NULL
;
946 tb
->jmp_next
[1] = NULL
;
948 tb
->cflags
&= ~CF_FP_USED
;
949 if (tb
->cflags
& CF_TB_FP_USED
)
950 tb
->cflags
|= CF_FP_USED
;
953 /* init original jump addresses */
954 if (tb
->tb_next_offset
[0] != 0xffff)
955 tb_reset_jump(tb
, 0);
956 if (tb
->tb_next_offset
[1] != 0xffff)
957 tb_reset_jump(tb
, 1);
959 #ifdef DEBUG_TB_CHECK
964 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
965 tb[1].tc_ptr. Return NULL if not found */
966 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
970 TranslationBlock
*tb
;
974 if (tc_ptr
< (unsigned long)code_gen_buffer
||
975 tc_ptr
>= (unsigned long)code_gen_ptr
)
977 /* binary search (cf Knuth) */
980 while (m_min
<= m_max
) {
981 m
= (m_min
+ m_max
) >> 1;
983 v
= (unsigned long)tb
->tc_ptr
;
986 else if (tc_ptr
< v
) {
995 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
997 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
999 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1002 tb1
= tb
->jmp_next
[n
];
1004 /* find head of list */
1007 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1010 tb1
= tb1
->jmp_next
[n1
];
1012 /* we are now sure now that tb jumps to tb1 */
1015 /* remove tb from the jmp_first list */
1016 ptb
= &tb_next
->jmp_first
;
1020 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1021 if (n1
== n
&& tb1
== tb
)
1023 ptb
= &tb1
->jmp_next
[n1
];
1025 *ptb
= tb
->jmp_next
[n
];
1026 tb
->jmp_next
[n
] = NULL
;
1028 /* suppress the jump to next tb in generated code */
1029 tb_reset_jump(tb
, n
);
1031 /* suppress jumps in the tb on which we could have jumped */
1032 tb_reset_jump_recursive(tb_next
);
1036 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1038 tb_reset_jump_recursive2(tb
, 0);
1039 tb_reset_jump_recursive2(tb
, 1);
1042 #if defined(TARGET_HAS_ICE)
1043 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1045 target_phys_addr_t addr
;
1047 ram_addr_t ram_addr
;
1050 addr
= cpu_get_phys_page_debug(env
, pc
);
1051 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1053 pd
= IO_MEM_UNASSIGNED
;
1055 pd
= p
->phys_offset
;
1057 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1058 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1062 /* Add a watchpoint. */
1063 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
)
1067 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1068 if (addr
== env
->watchpoint
[i
].vaddr
)
1071 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1074 i
= env
->nb_watchpoints
++;
1075 env
->watchpoint
[i
].vaddr
= addr
;
1076 tlb_flush_page(env
, addr
);
1077 /* FIXME: This flush is needed because of the hack to make memory ops
1078 terminate the TB. It can be removed once the proper IO trap and
1079 re-execute bits are in. */
1084 /* Remove a watchpoint. */
1085 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1089 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1090 if (addr
== env
->watchpoint
[i
].vaddr
) {
1091 env
->nb_watchpoints
--;
1092 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1093 tlb_flush_page(env
, addr
);
1100 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1101 breakpoint is reached */
1102 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1104 #if defined(TARGET_HAS_ICE)
1107 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1108 if (env
->breakpoints
[i
] == pc
)
1112 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1114 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1116 breakpoint_invalidate(env
, pc
);
1123 /* remove a breakpoint */
1124 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1126 #if defined(TARGET_HAS_ICE)
1128 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1129 if (env
->breakpoints
[i
] == pc
)
1134 env
->nb_breakpoints
--;
1135 if (i
< env
->nb_breakpoints
)
1136 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1138 breakpoint_invalidate(env
, pc
);
1145 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1146 CPU loop after each instruction */
1147 void cpu_single_step(CPUState
*env
, int enabled
)
1149 #if defined(TARGET_HAS_ICE)
1150 if (env
->singlestep_enabled
!= enabled
) {
1151 env
->singlestep_enabled
= enabled
;
1152 /* must flush all the translated code to avoid inconsistancies */
1153 /* XXX: only flush what is necessary */
1159 /* enable or disable low levels log */
1160 void cpu_set_log(int log_flags
)
1162 loglevel
= log_flags
;
1163 if (loglevel
&& !logfile
) {
1164 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1166 perror(logfilename
);
1169 #if !defined(CONFIG_SOFTMMU)
1170 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1172 static uint8_t logfile_buf
[4096];
1173 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1176 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1180 if (!loglevel
&& logfile
) {
1186 void cpu_set_log_filename(const char *filename
)
1188 logfilename
= strdup(filename
);
1193 cpu_set_log(loglevel
);
1196 /* mask must never be zero, except for A20 change call */
1197 void cpu_interrupt(CPUState
*env
, int mask
)
1199 TranslationBlock
*tb
;
1200 static int interrupt_lock
;
1202 env
->interrupt_request
|= mask
;
1203 /* if the cpu is currently executing code, we must unlink it and
1204 all the potentially executing TB */
1205 tb
= env
->current_tb
;
1206 if (tb
&& !testandset(&interrupt_lock
)) {
1207 env
->current_tb
= NULL
;
1208 tb_reset_jump_recursive(tb
);
1213 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1215 env
->interrupt_request
&= ~mask
;
1218 CPULogItem cpu_log_items
[] = {
1219 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1220 "show generated host assembly code for each compiled TB" },
1221 { CPU_LOG_TB_IN_ASM
, "in_asm",
1222 "show target assembly code for each compiled TB" },
1223 { CPU_LOG_TB_OP
, "op",
1224 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1226 { CPU_LOG_TB_OP_OPT
, "op_opt",
1227 "show micro ops after optimization for each compiled TB" },
1229 { CPU_LOG_INT
, "int",
1230 "show interrupts/exceptions in short format" },
1231 { CPU_LOG_EXEC
, "exec",
1232 "show trace before each executed TB (lots of logs)" },
1233 { CPU_LOG_TB_CPU
, "cpu",
1234 "show CPU state before block translation" },
1236 { CPU_LOG_PCALL
, "pcall",
1237 "show protected mode far calls/returns/exceptions" },
1240 { CPU_LOG_IOPORT
, "ioport",
1241 "show all i/o ports accesses" },
1246 static int cmp1(const char *s1
, int n
, const char *s2
)
1248 if (strlen(s2
) != n
)
1250 return memcmp(s1
, s2
, n
) == 0;
1253 /* takes a comma separated list of log masks. Return 0 if error. */
1254 int cpu_str_to_log_mask(const char *str
)
1263 p1
= strchr(p
, ',');
1266 if(cmp1(p
,p1
-p
,"all")) {
1267 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1271 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1272 if (cmp1(p
, p1
- p
, item
->name
))
1286 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1291 fprintf(stderr
, "qemu: fatal: ");
1292 vfprintf(stderr
, fmt
, ap
);
1293 fprintf(stderr
, "\n");
1295 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1297 cpu_dump_state(env
, stderr
, fprintf
, 0);
1307 CPUState
*cpu_copy(CPUState
*env
)
1309 CPUState
*new_env
= cpu_init();
1310 /* preserve chaining and index */
1311 CPUState
*next_cpu
= new_env
->next_cpu
;
1312 int cpu_index
= new_env
->cpu_index
;
1313 memcpy(new_env
, env
, sizeof(CPUState
));
1314 new_env
->next_cpu
= next_cpu
;
1315 new_env
->cpu_index
= cpu_index
;
1319 #if !defined(CONFIG_USER_ONLY)
1321 /* NOTE: if flush_global is true, also flush global entries (not
1323 void tlb_flush(CPUState
*env
, int flush_global
)
1327 #if defined(DEBUG_TLB)
1328 printf("tlb_flush:\n");
1330 /* must reset current TB so that interrupts cannot modify the
1331 links while we are modifying them */
1332 env
->current_tb
= NULL
;
1334 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1335 env
->tlb_table
[0][i
].addr_read
= -1;
1336 env
->tlb_table
[0][i
].addr_write
= -1;
1337 env
->tlb_table
[0][i
].addr_code
= -1;
1338 env
->tlb_table
[1][i
].addr_read
= -1;
1339 env
->tlb_table
[1][i
].addr_write
= -1;
1340 env
->tlb_table
[1][i
].addr_code
= -1;
1341 #if (NB_MMU_MODES >= 3)
1342 env
->tlb_table
[2][i
].addr_read
= -1;
1343 env
->tlb_table
[2][i
].addr_write
= -1;
1344 env
->tlb_table
[2][i
].addr_code
= -1;
1345 #if (NB_MMU_MODES == 4)
1346 env
->tlb_table
[3][i
].addr_read
= -1;
1347 env
->tlb_table
[3][i
].addr_write
= -1;
1348 env
->tlb_table
[3][i
].addr_code
= -1;
1353 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1355 #if !defined(CONFIG_SOFTMMU)
1356 munmap((void *)MMAP_AREA_START
, MMAP_AREA_END
- MMAP_AREA_START
);
1359 if (env
->kqemu_enabled
) {
1360 kqemu_flush(env
, flush_global
);
1366 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1368 if (addr
== (tlb_entry
->addr_read
&
1369 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1370 addr
== (tlb_entry
->addr_write
&
1371 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1372 addr
== (tlb_entry
->addr_code
&
1373 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1374 tlb_entry
->addr_read
= -1;
1375 tlb_entry
->addr_write
= -1;
1376 tlb_entry
->addr_code
= -1;
1380 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1383 TranslationBlock
*tb
;
1385 #if defined(DEBUG_TLB)
1386 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1388 /* must reset current TB so that interrupts cannot modify the
1389 links while we are modifying them */
1390 env
->current_tb
= NULL
;
1392 addr
&= TARGET_PAGE_MASK
;
1393 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1394 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1395 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1396 #if (NB_MMU_MODES >= 3)
1397 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1398 #if (NB_MMU_MODES == 4)
1399 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1403 /* Discard jump cache entries for any tb which might potentially
1404 overlap the flushed page. */
1405 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1406 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1408 i
= tb_jmp_cache_hash_page(addr
);
1409 memset (&env
->tb_jmp_cache
[i
], 0, TB_JMP_PAGE_SIZE
* sizeof(tb
));
1411 #if !defined(CONFIG_SOFTMMU)
1412 if (addr
< MMAP_AREA_END
)
1413 munmap((void *)addr
, TARGET_PAGE_SIZE
);
1416 if (env
->kqemu_enabled
) {
1417 kqemu_flush_page(env
, addr
);
1422 /* update the TLBs so that writes to code in the virtual page 'addr'
1424 static void tlb_protect_code(ram_addr_t ram_addr
)
1426 cpu_physical_memory_reset_dirty(ram_addr
,
1427 ram_addr
+ TARGET_PAGE_SIZE
,
1431 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1432 tested for self modifying code */
1433 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1436 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1439 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1440 unsigned long start
, unsigned long length
)
1443 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1444 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1445 if ((addr
- start
) < length
) {
1446 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_NOTDIRTY
;
1451 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1455 unsigned long length
, start1
;
1459 start
&= TARGET_PAGE_MASK
;
1460 end
= TARGET_PAGE_ALIGN(end
);
1462 length
= end
- start
;
1465 len
= length
>> TARGET_PAGE_BITS
;
1467 /* XXX: should not depend on cpu context */
1469 if (env
->kqemu_enabled
) {
1472 for(i
= 0; i
< len
; i
++) {
1473 kqemu_set_notdirty(env
, addr
);
1474 addr
+= TARGET_PAGE_SIZE
;
1478 mask
= ~dirty_flags
;
1479 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1480 for(i
= 0; i
< len
; i
++)
1483 /* we modify the TLB cache so that the dirty bit will be set again
1484 when accessing the range */
1485 start1
= start
+ (unsigned long)phys_ram_base
;
1486 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1487 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1488 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1489 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1490 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1491 #if (NB_MMU_MODES >= 3)
1492 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1493 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1494 #if (NB_MMU_MODES == 4)
1495 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1496 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1501 #if !defined(CONFIG_SOFTMMU)
1502 /* XXX: this is expensive */
1508 for(i
= 0; i
< L1_SIZE
; i
++) {
1511 addr
= i
<< (TARGET_PAGE_BITS
+ L2_BITS
);
1512 for(j
= 0; j
< L2_SIZE
; j
++) {
1513 if (p
->valid_tag
== virt_valid_tag
&&
1514 p
->phys_addr
>= start
&& p
->phys_addr
< end
&&
1515 (p
->prot
& PROT_WRITE
)) {
1516 if (addr
< MMAP_AREA_END
) {
1517 mprotect((void *)addr
, TARGET_PAGE_SIZE
,
1518 p
->prot
& ~PROT_WRITE
);
1521 addr
+= TARGET_PAGE_SIZE
;
1530 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1532 ram_addr_t ram_addr
;
1534 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1535 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1536 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1537 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1538 tlb_entry
->addr_write
|= IO_MEM_NOTDIRTY
;
1543 /* update the TLB according to the current state of the dirty bits */
1544 void cpu_tlb_update_dirty(CPUState
*env
)
1547 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1548 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1549 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1550 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1551 #if (NB_MMU_MODES >= 3)
1552 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1553 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1554 #if (NB_MMU_MODES == 4)
1555 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1556 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1561 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
,
1562 unsigned long start
)
1565 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_NOTDIRTY
) {
1566 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1567 if (addr
== start
) {
1568 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | IO_MEM_RAM
;
1573 /* update the TLB corresponding to virtual page vaddr and phys addr
1574 addr so that it is no longer dirty */
1575 static inline void tlb_set_dirty(CPUState
*env
,
1576 unsigned long addr
, target_ulong vaddr
)
1580 addr
&= TARGET_PAGE_MASK
;
1581 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1582 tlb_set_dirty1(&env
->tlb_table
[0][i
], addr
);
1583 tlb_set_dirty1(&env
->tlb_table
[1][i
], addr
);
1584 #if (NB_MMU_MODES >= 3)
1585 tlb_set_dirty1(&env
->tlb_table
[2][i
], addr
);
1586 #if (NB_MMU_MODES == 4)
1587 tlb_set_dirty1(&env
->tlb_table
[3][i
], addr
);
1592 /* add a new TLB entry. At most one entry for a given virtual address
1593 is permitted. Return 0 if OK or 2 if the page could not be mapped
1594 (can only happen in non SOFTMMU mode for I/O pages or pages
1595 conflicting with the host address space). */
1596 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1597 target_phys_addr_t paddr
, int prot
,
1598 int is_user
, int is_softmmu
)
1603 target_ulong address
;
1604 target_phys_addr_t addend
;
1609 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1611 pd
= IO_MEM_UNASSIGNED
;
1613 pd
= p
->phys_offset
;
1615 #if defined(DEBUG_TLB)
1616 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1617 vaddr
, (int)paddr
, prot
, is_user
, is_softmmu
, pd
);
1621 #if !defined(CONFIG_SOFTMMU)
1625 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1626 /* IO memory case */
1627 address
= vaddr
| pd
;
1630 /* standard memory */
1632 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1635 /* Make accesses to pages with watchpoints go via the
1636 watchpoint trap routines. */
1637 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1638 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1639 if (address
& ~TARGET_PAGE_MASK
) {
1640 env
->watchpoint
[i
].addend
= 0;
1641 address
= vaddr
| io_mem_watch
;
1643 env
->watchpoint
[i
].addend
= pd
- paddr
+
1644 (unsigned long) phys_ram_base
;
1645 /* TODO: Figure out how to make read watchpoints coexist
1647 pd
= (pd
& TARGET_PAGE_MASK
) | io_mem_watch
| IO_MEM_ROMD
;
1652 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1654 te
= &env
->tlb_table
[is_user
][index
];
1655 te
->addend
= addend
;
1656 if (prot
& PAGE_READ
) {
1657 te
->addr_read
= address
;
1661 if (prot
& PAGE_EXEC
) {
1662 te
->addr_code
= address
;
1666 if (prot
& PAGE_WRITE
) {
1667 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1668 (pd
& IO_MEM_ROMD
)) {
1669 /* write access calls the I/O callback */
1670 te
->addr_write
= vaddr
|
1671 (pd
& ~(TARGET_PAGE_MASK
| IO_MEM_ROMD
));
1672 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1673 !cpu_physical_memory_is_dirty(pd
)) {
1674 te
->addr_write
= vaddr
| IO_MEM_NOTDIRTY
;
1676 te
->addr_write
= address
;
1679 te
->addr_write
= -1;
1682 #if !defined(CONFIG_SOFTMMU)
1684 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
) {
1685 /* IO access: no mapping is done as it will be handled by the
1687 if (!(env
->hflags
& HF_SOFTMMU_MASK
))
1692 if (vaddr
>= MMAP_AREA_END
) {
1695 if (prot
& PROT_WRITE
) {
1696 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1697 #if defined(TARGET_HAS_SMC) || 1
1700 ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1701 !cpu_physical_memory_is_dirty(pd
))) {
1702 /* ROM: we do as if code was inside */
1703 /* if code is present, we only map as read only and save the
1707 vp
= virt_page_find_alloc(vaddr
>> TARGET_PAGE_BITS
, 1);
1710 vp
->valid_tag
= virt_valid_tag
;
1711 prot
&= ~PAGE_WRITE
;
1714 map_addr
= mmap((void *)vaddr
, TARGET_PAGE_SIZE
, prot
,
1715 MAP_SHARED
| MAP_FIXED
, phys_ram_fd
, (pd
& TARGET_PAGE_MASK
));
1716 if (map_addr
== MAP_FAILED
) {
1717 cpu_abort(env
, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1727 /* called from signal handler: invalidate the code and unprotect the
1728 page. Return TRUE if the fault was succesfully handled. */
1729 int page_unprotect(target_ulong addr
, unsigned long pc
, void *puc
)
1731 #if !defined(CONFIG_SOFTMMU)
1734 #if defined(DEBUG_TLB)
1735 printf("page_unprotect: addr=0x%08x\n", addr
);
1737 addr
&= TARGET_PAGE_MASK
;
1739 /* if it is not mapped, no need to worry here */
1740 if (addr
>= MMAP_AREA_END
)
1742 vp
= virt_page_find(addr
>> TARGET_PAGE_BITS
);
1745 /* NOTE: in this case, validate_tag is _not_ tested as it
1746 validates only the code TLB */
1747 if (vp
->valid_tag
!= virt_valid_tag
)
1749 if (!(vp
->prot
& PAGE_WRITE
))
1751 #if defined(DEBUG_TLB)
1752 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1753 addr
, vp
->phys_addr
, vp
->prot
);
1755 if (mprotect((void *)addr
, TARGET_PAGE_SIZE
, vp
->prot
) < 0)
1756 cpu_abort(cpu_single_env
, "error mprotect addr=0x%lx prot=%d\n",
1757 (unsigned long)addr
, vp
->prot
);
1758 /* set the dirty bit */
1759 phys_ram_dirty
[vp
->phys_addr
>> TARGET_PAGE_BITS
] = 0xff;
1760 /* flush the code inside */
1761 tb_invalidate_phys_page(vp
->phys_addr
, pc
, puc
);
1770 void tlb_flush(CPUState
*env
, int flush_global
)
1774 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1778 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1779 target_phys_addr_t paddr
, int prot
,
1780 int is_user
, int is_softmmu
)
1785 /* dump memory mappings */
1786 void page_dump(FILE *f
)
1788 unsigned long start
, end
;
1789 int i
, j
, prot
, prot1
;
1792 fprintf(f
, "%-8s %-8s %-8s %s\n",
1793 "start", "end", "size", "prot");
1797 for(i
= 0; i
<= L1_SIZE
; i
++) {
1802 for(j
= 0;j
< L2_SIZE
; j
++) {
1807 if (prot1
!= prot
) {
1808 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1810 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1811 start
, end
, end
- start
,
1812 prot
& PAGE_READ
? 'r' : '-',
1813 prot
& PAGE_WRITE
? 'w' : '-',
1814 prot
& PAGE_EXEC
? 'x' : '-');
1828 int page_get_flags(target_ulong address
)
1832 p
= page_find(address
>> TARGET_PAGE_BITS
);
1838 /* modify the flags of a page and invalidate the code if
1839 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1840 depending on PAGE_WRITE */
1841 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1846 start
= start
& TARGET_PAGE_MASK
;
1847 end
= TARGET_PAGE_ALIGN(end
);
1848 if (flags
& PAGE_WRITE
)
1849 flags
|= PAGE_WRITE_ORG
;
1850 spin_lock(&tb_lock
);
1851 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1852 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1853 /* if the write protection is set, then we invalidate the code
1855 if (!(p
->flags
& PAGE_WRITE
) &&
1856 (flags
& PAGE_WRITE
) &&
1858 tb_invalidate_phys_page(addr
, 0, NULL
);
1862 spin_unlock(&tb_lock
);
1865 /* called from signal handler: invalidate the code and unprotect the
1866 page. Return TRUE if the fault was succesfully handled. */
1867 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
1869 unsigned int page_index
, prot
, pindex
;
1871 target_ulong host_start
, host_end
, addr
;
1873 host_start
= address
& qemu_host_page_mask
;
1874 page_index
= host_start
>> TARGET_PAGE_BITS
;
1875 p1
= page_find(page_index
);
1878 host_end
= host_start
+ qemu_host_page_size
;
1881 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
1885 /* if the page was really writable, then we change its
1886 protection back to writable */
1887 if (prot
& PAGE_WRITE_ORG
) {
1888 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
1889 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
1890 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
1891 (prot
& PAGE_BITS
) | PAGE_WRITE
);
1892 p1
[pindex
].flags
|= PAGE_WRITE
;
1893 /* and since the content will be modified, we must invalidate
1894 the corresponding translated code. */
1895 tb_invalidate_phys_page(address
, pc
, puc
);
1896 #ifdef DEBUG_TB_CHECK
1897 tb_invalidate_check(address
);
1905 /* call this function when system calls directly modify a memory area */
1906 /* ??? This should be redundant now we have lock_user. */
1907 void page_unprotect_range(target_ulong data
, target_ulong data_size
)
1909 target_ulong start
, end
, addr
;
1912 end
= start
+ data_size
;
1913 start
&= TARGET_PAGE_MASK
;
1914 end
= TARGET_PAGE_ALIGN(end
);
1915 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1916 page_unprotect(addr
, 0, NULL
);
1920 static inline void tlb_set_dirty(CPUState
*env
,
1921 unsigned long addr
, target_ulong vaddr
)
1924 #endif /* defined(CONFIG_USER_ONLY) */
1926 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
1928 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
1930 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1933 if (addr > start_addr) \
1936 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
1937 if (start_addr2 > 0) \
1941 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
1942 end_addr2 = TARGET_PAGE_SIZE - 1; \
1944 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1945 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
1950 /* register physical memory. 'size' must be a multiple of the target
1951 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1953 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
1955 unsigned long phys_offset
)
1957 target_phys_addr_t addr
, end_addr
;
1960 unsigned long orig_size
= size
;
1963 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
1964 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
1965 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
1966 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1967 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
1968 unsigned long orig_memory
= p
->phys_offset
;
1969 target_phys_addr_t start_addr2
, end_addr2
;
1970 int need_subpage
= 0;
1972 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
1975 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
1976 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
1977 &p
->phys_offset
, orig_memory
);
1979 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
1982 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
1984 p
->phys_offset
= phys_offset
;
1985 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1986 (phys_offset
& IO_MEM_ROMD
))
1987 phys_offset
+= TARGET_PAGE_SIZE
;
1990 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
1991 p
->phys_offset
= phys_offset
;
1992 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
1993 (phys_offset
& IO_MEM_ROMD
))
1994 phys_offset
+= TARGET_PAGE_SIZE
;
1996 target_phys_addr_t start_addr2
, end_addr2
;
1997 int need_subpage
= 0;
1999 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2000 end_addr2
, need_subpage
);
2003 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2004 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2005 subpage_register(subpage
, start_addr2
, end_addr2
,
2012 /* since each CPU stores ram addresses in its TLB cache, we must
2013 reset the modified entries */
2015 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2020 /* XXX: temporary until new memory mapping API */
2021 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr
)
2025 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2027 return IO_MEM_UNASSIGNED
;
2028 return p
->phys_offset
;
2031 /* XXX: better than nothing */
2032 ram_addr_t
qemu_ram_alloc(unsigned int size
)
2035 if ((phys_ram_alloc_offset
+ size
) >= phys_ram_size
) {
2036 fprintf(stderr
, "Not enough memory (requested_size = %u, max memory = %d)\n",
2037 size
, phys_ram_size
);
2040 addr
= phys_ram_alloc_offset
;
2041 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2045 void qemu_ram_free(ram_addr_t addr
)
2049 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2051 #ifdef DEBUG_UNASSIGNED
2052 printf("Unassigned mem read " TARGET_FMT_lx
"\n", addr
);
2055 do_unassigned_access(addr
, 0, 0, 0);
2060 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2062 #ifdef DEBUG_UNASSIGNED
2063 printf("Unassigned mem write " TARGET_FMT_lx
" = 0x%x\n", addr
, val
);
2066 do_unassigned_access(addr
, 1, 0, 0);
2070 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2071 unassigned_mem_readb
,
2072 unassigned_mem_readb
,
2073 unassigned_mem_readb
,
2076 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2077 unassigned_mem_writeb
,
2078 unassigned_mem_writeb
,
2079 unassigned_mem_writeb
,
2082 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2084 unsigned long ram_addr
;
2086 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2087 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2088 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2089 #if !defined(CONFIG_USER_ONLY)
2090 tb_invalidate_phys_page_fast(ram_addr
, 1);
2091 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2094 stb_p((uint8_t *)(long)addr
, val
);
2096 if (cpu_single_env
->kqemu_enabled
&&
2097 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2098 kqemu_modify_page(cpu_single_env
, ram_addr
);
2100 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2101 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2102 /* we remove the notdirty callback only if the code has been
2104 if (dirty_flags
== 0xff)
2105 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2108 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2110 unsigned long ram_addr
;
2112 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2113 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2114 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2115 #if !defined(CONFIG_USER_ONLY)
2116 tb_invalidate_phys_page_fast(ram_addr
, 2);
2117 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2120 stw_p((uint8_t *)(long)addr
, val
);
2122 if (cpu_single_env
->kqemu_enabled
&&
2123 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2124 kqemu_modify_page(cpu_single_env
, ram_addr
);
2126 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2127 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2128 /* we remove the notdirty callback only if the code has been
2130 if (dirty_flags
== 0xff)
2131 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2134 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2136 unsigned long ram_addr
;
2138 ram_addr
= addr
- (unsigned long)phys_ram_base
;
2139 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2140 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2141 #if !defined(CONFIG_USER_ONLY)
2142 tb_invalidate_phys_page_fast(ram_addr
, 4);
2143 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2146 stl_p((uint8_t *)(long)addr
, val
);
2148 if (cpu_single_env
->kqemu_enabled
&&
2149 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2150 kqemu_modify_page(cpu_single_env
, ram_addr
);
2152 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2153 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2154 /* we remove the notdirty callback only if the code has been
2156 if (dirty_flags
== 0xff)
2157 tlb_set_dirty(cpu_single_env
, addr
, cpu_single_env
->mem_write_vaddr
);
2160 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2161 NULL
, /* never used */
2162 NULL
, /* never used */
2163 NULL
, /* never used */
2166 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2167 notdirty_mem_writeb
,
2168 notdirty_mem_writew
,
2169 notdirty_mem_writel
,
2172 #if defined(CONFIG_SOFTMMU)
2173 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2174 so these check for a hit then pass through to the normal out-of-line
2176 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2178 return ldub_phys(addr
);
2181 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2183 return lduw_phys(addr
);
2186 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2188 return ldl_phys(addr
);
2191 /* Generate a debug exception if a watchpoint has been hit.
2192 Returns the real physical address of the access. addr will be a host
2193 address in case of a RAM location. */
2194 static target_ulong
check_watchpoint(target_phys_addr_t addr
)
2196 CPUState
*env
= cpu_single_env
;
2198 target_ulong retaddr
;
2202 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2203 watch
= env
->watchpoint
[i
].vaddr
;
2204 if (((env
->mem_write_vaddr
^ watch
) & TARGET_PAGE_MASK
) == 0) {
2205 retaddr
= addr
- env
->watchpoint
[i
].addend
;
2206 if (((addr
^ watch
) & ~TARGET_PAGE_MASK
) == 0) {
2207 cpu_single_env
->watchpoint_hit
= i
+ 1;
2208 cpu_interrupt(cpu_single_env
, CPU_INTERRUPT_DEBUG
);
2216 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2219 addr
= check_watchpoint(addr
);
2220 stb_phys(addr
, val
);
2223 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2226 addr
= check_watchpoint(addr
);
2227 stw_phys(addr
, val
);
2230 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2233 addr
= check_watchpoint(addr
);
2234 stl_phys(addr
, val
);
2237 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2243 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2250 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2253 CPUReadMemoryFunc
**mem_read
;
2257 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2258 #if defined(DEBUG_SUBPAGE)
2259 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2260 mmio
, len
, addr
, idx
);
2262 mem_read
= mmio
->mem_read
[idx
];
2263 ret
= (*mem_read
[len
])(mmio
->opaque
[idx
], addr
);
2268 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2269 uint32_t value
, unsigned int len
)
2271 CPUWriteMemoryFunc
**mem_write
;
2274 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2275 #if defined(DEBUG_SUBPAGE)
2276 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2277 mmio
, len
, addr
, idx
, value
);
2279 mem_write
= mmio
->mem_write
[idx
];
2280 (*mem_write
[len
])(mmio
->opaque
[idx
], addr
, value
);
2283 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2285 #if defined(DEBUG_SUBPAGE)
2286 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2289 return subpage_readlen(opaque
, addr
, 0);
2292 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2295 #if defined(DEBUG_SUBPAGE)
2296 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2298 subpage_writelen(opaque
, addr
, value
, 0);
2301 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2303 #if defined(DEBUG_SUBPAGE)
2304 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2307 return subpage_readlen(opaque
, addr
, 1);
2310 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2313 #if defined(DEBUG_SUBPAGE)
2314 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2316 subpage_writelen(opaque
, addr
, value
, 1);
2319 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2321 #if defined(DEBUG_SUBPAGE)
2322 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2325 return subpage_readlen(opaque
, addr
, 2);
2328 static void subpage_writel (void *opaque
,
2329 target_phys_addr_t addr
, uint32_t value
)
2331 #if defined(DEBUG_SUBPAGE)
2332 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2334 subpage_writelen(opaque
, addr
, value
, 2);
2337 static CPUReadMemoryFunc
*subpage_read
[] = {
2343 static CPUWriteMemoryFunc
*subpage_write
[] = {
2349 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2354 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2356 idx
= SUBPAGE_IDX(start
);
2357 eidx
= SUBPAGE_IDX(end
);
2358 #if defined(DEBUG_SUBPAGE)
2359 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2360 mmio
, start
, end
, idx
, eidx
, memory
);
2362 memory
>>= IO_MEM_SHIFT
;
2363 for (; idx
<= eidx
; idx
++) {
2364 mmio
->mem_read
[idx
] = io_mem_read
[memory
];
2365 mmio
->mem_write
[idx
] = io_mem_write
[memory
];
2366 mmio
->opaque
[idx
] = io_mem_opaque
[memory
];
2372 static void *subpage_init (target_phys_addr_t base
, uint32_t *phys
,
2378 mmio
= qemu_mallocz(sizeof(subpage_t
));
2381 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2382 #if defined(DEBUG_SUBPAGE)
2383 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2384 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2386 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2387 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2393 static void io_mem_init(void)
2395 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2396 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2397 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2400 #if defined(CONFIG_SOFTMMU)
2401 io_mem_watch
= cpu_register_io_memory(-1, watch_mem_read
,
2402 watch_mem_write
, NULL
);
2404 /* alloc dirty bits array */
2405 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2406 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2409 /* mem_read and mem_write are arrays of functions containing the
2410 function to access byte (index 0), word (index 1) and dword (index
2411 2). All functions must be supplied. If io_index is non zero, the
2412 corresponding io zone is modified. If it is zero, a new io zone is
2413 allocated. The return value can be used with
2414 cpu_register_physical_memory(). (-1) is returned if error. */
2415 int cpu_register_io_memory(int io_index
,
2416 CPUReadMemoryFunc
**mem_read
,
2417 CPUWriteMemoryFunc
**mem_write
,
2422 if (io_index
<= 0) {
2423 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2425 io_index
= io_mem_nb
++;
2427 if (io_index
>= IO_MEM_NB_ENTRIES
)
2431 for(i
= 0;i
< 3; i
++) {
2432 io_mem_read
[io_index
][i
] = mem_read
[i
];
2433 io_mem_write
[io_index
][i
] = mem_write
[i
];
2435 io_mem_opaque
[io_index
] = opaque
;
2436 return io_index
<< IO_MEM_SHIFT
;
2439 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2441 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2444 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2446 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2449 /* physical memory access (slow version, mainly for debug) */
2450 #if defined(CONFIG_USER_ONLY)
2451 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2452 int len
, int is_write
)
2459 page
= addr
& TARGET_PAGE_MASK
;
2460 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2463 flags
= page_get_flags(page
);
2464 if (!(flags
& PAGE_VALID
))
2467 if (!(flags
& PAGE_WRITE
))
2469 p
= lock_user(addr
, len
, 0);
2470 memcpy(p
, buf
, len
);
2471 unlock_user(p
, addr
, len
);
2473 if (!(flags
& PAGE_READ
))
2475 p
= lock_user(addr
, len
, 1);
2476 memcpy(buf
, p
, len
);
2477 unlock_user(p
, addr
, 0);
2486 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2487 int len
, int is_write
)
2492 target_phys_addr_t page
;
2497 page
= addr
& TARGET_PAGE_MASK
;
2498 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2501 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2503 pd
= IO_MEM_UNASSIGNED
;
2505 pd
= p
->phys_offset
;
2509 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2510 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2511 /* XXX: could force cpu_single_env to NULL to avoid
2513 if (l
>= 4 && ((addr
& 3) == 0)) {
2514 /* 32 bit write access */
2516 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2518 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2519 /* 16 bit write access */
2521 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2524 /* 8 bit write access */
2526 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2530 unsigned long addr1
;
2531 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2533 ptr
= phys_ram_base
+ addr1
;
2534 memcpy(ptr
, buf
, l
);
2535 if (!cpu_physical_memory_is_dirty(addr1
)) {
2536 /* invalidate code */
2537 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2539 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2540 (0xff & ~CODE_DIRTY_FLAG
);
2544 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2545 !(pd
& IO_MEM_ROMD
)) {
2547 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2548 if (l
>= 4 && ((addr
& 3) == 0)) {
2549 /* 32 bit read access */
2550 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2553 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2554 /* 16 bit read access */
2555 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2559 /* 8 bit read access */
2560 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2566 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2567 (addr
& ~TARGET_PAGE_MASK
);
2568 memcpy(buf
, ptr
, l
);
2577 /* used for ROM loading : can write in RAM and ROM */
2578 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2579 const uint8_t *buf
, int len
)
2583 target_phys_addr_t page
;
2588 page
= addr
& TARGET_PAGE_MASK
;
2589 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2592 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2594 pd
= IO_MEM_UNASSIGNED
;
2596 pd
= p
->phys_offset
;
2599 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2600 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2601 !(pd
& IO_MEM_ROMD
)) {
2604 unsigned long addr1
;
2605 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2607 ptr
= phys_ram_base
+ addr1
;
2608 memcpy(ptr
, buf
, l
);
2617 /* warning: addr must be aligned */
2618 uint32_t ldl_phys(target_phys_addr_t addr
)
2626 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2628 pd
= IO_MEM_UNASSIGNED
;
2630 pd
= p
->phys_offset
;
2633 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2634 !(pd
& IO_MEM_ROMD
)) {
2636 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2637 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2640 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2641 (addr
& ~TARGET_PAGE_MASK
);
2647 /* warning: addr must be aligned */
2648 uint64_t ldq_phys(target_phys_addr_t addr
)
2656 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2658 pd
= IO_MEM_UNASSIGNED
;
2660 pd
= p
->phys_offset
;
2663 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2664 !(pd
& IO_MEM_ROMD
)) {
2666 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2667 #ifdef TARGET_WORDS_BIGENDIAN
2668 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2669 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2671 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2672 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2676 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2677 (addr
& ~TARGET_PAGE_MASK
);
2684 uint32_t ldub_phys(target_phys_addr_t addr
)
2687 cpu_physical_memory_read(addr
, &val
, 1);
2692 uint32_t lduw_phys(target_phys_addr_t addr
)
2695 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2696 return tswap16(val
);
2699 /* warning: addr must be aligned. The ram page is not masked as dirty
2700 and the code inside is not invalidated. It is useful if the dirty
2701 bits are used to track modified PTEs */
2702 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2709 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2711 pd
= IO_MEM_UNASSIGNED
;
2713 pd
= p
->phys_offset
;
2716 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2717 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2718 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2720 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2721 (addr
& ~TARGET_PAGE_MASK
);
2726 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2733 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2735 pd
= IO_MEM_UNASSIGNED
;
2737 pd
= p
->phys_offset
;
2740 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2741 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2742 #ifdef TARGET_WORDS_BIGENDIAN
2743 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2744 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2746 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2747 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2750 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2751 (addr
& ~TARGET_PAGE_MASK
);
2756 /* warning: addr must be aligned */
2757 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2764 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2766 pd
= IO_MEM_UNASSIGNED
;
2768 pd
= p
->phys_offset
;
2771 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2772 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2773 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2775 unsigned long addr1
;
2776 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2778 ptr
= phys_ram_base
+ addr1
;
2780 if (!cpu_physical_memory_is_dirty(addr1
)) {
2781 /* invalidate code */
2782 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2784 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2785 (0xff & ~CODE_DIRTY_FLAG
);
2791 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2794 cpu_physical_memory_write(addr
, &v
, 1);
2798 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2800 uint16_t v
= tswap16(val
);
2801 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
2805 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
2808 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
2813 /* virtual memory access for debug */
2814 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
2815 uint8_t *buf
, int len
, int is_write
)
2818 target_phys_addr_t phys_addr
;
2822 page
= addr
& TARGET_PAGE_MASK
;
2823 phys_addr
= cpu_get_phys_page_debug(env
, page
);
2824 /* if no physical page mapped, return an error */
2825 if (phys_addr
== -1)
2827 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2830 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
2839 void dump_exec_info(FILE *f
,
2840 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
2842 int i
, target_code_size
, max_target_code_size
;
2843 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
2844 TranslationBlock
*tb
;
2846 target_code_size
= 0;
2847 max_target_code_size
= 0;
2849 direct_jmp_count
= 0;
2850 direct_jmp2_count
= 0;
2851 for(i
= 0; i
< nb_tbs
; i
++) {
2853 target_code_size
+= tb
->size
;
2854 if (tb
->size
> max_target_code_size
)
2855 max_target_code_size
= tb
->size
;
2856 if (tb
->page_addr
[1] != -1)
2858 if (tb
->tb_next_offset
[0] != 0xffff) {
2860 if (tb
->tb_next_offset
[1] != 0xffff) {
2861 direct_jmp2_count
++;
2865 /* XXX: avoid using doubles ? */
2866 cpu_fprintf(f
, "TB count %d\n", nb_tbs
);
2867 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
2868 nb_tbs
? target_code_size
/ nb_tbs
: 0,
2869 max_target_code_size
);
2870 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2871 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
2872 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
2873 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
2875 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
2876 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2878 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
2880 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
2881 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
2882 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
2883 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
2886 #if !defined(CONFIG_USER_ONLY)
2888 #define MMUSUFFIX _cmmu
2889 #define GETPC() NULL
2890 #define env cpu_single_env
2891 #define SOFTMMU_CODE_ACCESS
2894 #include "softmmu_template.h"
2897 #include "softmmu_template.h"
2900 #include "softmmu_template.h"
2903 #include "softmmu_template.h"