2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
45 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_UNASSIGNED
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
62 #define SMC_BITMAP_USE_THRESHOLD 10
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 36
81 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 TranslationBlock
*tbs
;
86 int code_gen_max_blocks
;
87 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
92 uint8_t code_gen_prologue
[1024] __attribute__((aligned (32)));
93 uint8_t *code_gen_buffer
;
94 unsigned long code_gen_buffer_size
;
95 /* threshold to flush the translated code buffer */
96 unsigned long code_gen_buffer_max_size
;
97 uint8_t *code_gen_ptr
;
99 #if !defined(CONFIG_USER_ONLY)
100 ram_addr_t phys_ram_size
;
102 uint8_t *phys_ram_base
;
103 uint8_t *phys_ram_dirty
;
104 static ram_addr_t phys_ram_alloc_offset
= 0;
108 /* current CPU in the current thread. It is only valid inside
110 CPUState
*cpu_single_env
;
111 /* 0 = Do not count executed instructions.
112 1 = Precise instruction counting.
113 2 = Adaptive rate instruction counting. */
115 /* Current instruction counter. While executing translated code this may
116 include some instructions that have not yet been executed. */
119 typedef struct PageDesc
{
120 /* list of TBs intersecting this ram page */
121 TranslationBlock
*first_tb
;
122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count
;
125 uint8_t *code_bitmap
;
126 #if defined(CONFIG_USER_ONLY)
131 typedef struct PhysPageDesc
{
132 /* offset in host memory of the page + io_index in the low bits */
133 ram_addr_t phys_offset
;
137 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
138 /* XXX: this is a temporary hack for alpha target.
139 * In the future, this is to be replaced by a multi-level table
140 * to actually be able to handle the complete 64 bits address space.
142 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
144 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
147 #define L1_SIZE (1 << L1_BITS)
148 #define L2_SIZE (1 << L2_BITS)
150 unsigned long qemu_real_host_page_size
;
151 unsigned long qemu_host_page_bits
;
152 unsigned long qemu_host_page_size
;
153 unsigned long qemu_host_page_mask
;
155 /* XXX: for system emulation, it could just be an array */
156 static PageDesc
*l1_map
[L1_SIZE
];
157 PhysPageDesc
**l1_phys_map
;
159 #if !defined(CONFIG_USER_ONLY)
160 static void io_mem_init(void);
162 /* io memory support */
163 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
164 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
165 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
166 static int io_mem_nb
;
167 static int io_mem_watch
;
171 char *logfilename
= "/tmp/qemu.log";
174 static int log_append
= 0;
177 static int tlb_flush_count
;
178 static int tb_flush_count
;
179 static int tb_phys_invalidate_count
;
181 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
182 typedef struct subpage_t
{
183 target_phys_addr_t base
;
184 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
185 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
186 void *opaque
[TARGET_PAGE_SIZE
][2][4];
190 static void map_exec(void *addr
, long size
)
193 VirtualProtect(addr
, size
,
194 PAGE_EXECUTE_READWRITE
, &old_protect
);
198 static void map_exec(void *addr
, long size
)
200 unsigned long start
, end
, page_size
;
202 page_size
= getpagesize();
203 start
= (unsigned long)addr
;
204 start
&= ~(page_size
- 1);
206 end
= (unsigned long)addr
+ size
;
207 end
+= page_size
- 1;
208 end
&= ~(page_size
- 1);
210 mprotect((void *)start
, end
- start
,
211 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
215 static void page_init(void)
217 /* NOTE: we can always suppose that qemu_host_page_size >=
221 SYSTEM_INFO system_info
;
224 GetSystemInfo(&system_info
);
225 qemu_real_host_page_size
= system_info
.dwPageSize
;
228 qemu_real_host_page_size
= getpagesize();
230 if (qemu_host_page_size
== 0)
231 qemu_host_page_size
= qemu_real_host_page_size
;
232 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
233 qemu_host_page_size
= TARGET_PAGE_SIZE
;
234 qemu_host_page_bits
= 0;
235 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
236 qemu_host_page_bits
++;
237 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
238 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
239 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
241 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
243 long long startaddr
, endaddr
;
248 last_brk
= (unsigned long)sbrk(0);
249 f
= fopen("/proc/self/maps", "r");
252 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
254 startaddr
= MIN(startaddr
,
255 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
256 endaddr
= MIN(endaddr
,
257 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
258 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
259 TARGET_PAGE_ALIGN(endaddr
),
270 static inline PageDesc
*page_find_alloc(target_ulong index
)
274 #if TARGET_LONG_BITS > 32
275 /* Host memory outside guest VM. For 32-bit targets we have already
276 excluded high addresses. */
277 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
* TARGET_PAGE_SIZE
))
280 lp
= &l1_map
[index
>> L2_BITS
];
283 /* allocate if not found */
284 #if defined(CONFIG_USER_ONLY)
286 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
287 /* Don't use qemu_malloc because it may recurse. */
288 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
289 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
292 if (addr
== (target_ulong
)addr
) {
293 page_set_flags(addr
& TARGET_PAGE_MASK
,
294 TARGET_PAGE_ALIGN(addr
+ len
),
298 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
302 return p
+ (index
& (L2_SIZE
- 1));
305 static inline PageDesc
*page_find(target_ulong index
)
309 p
= l1_map
[index
>> L2_BITS
];
312 return p
+ (index
& (L2_SIZE
- 1));
315 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
320 p
= (void **)l1_phys_map
;
321 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
323 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
324 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
326 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
329 /* allocate if not found */
332 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
333 memset(p
, 0, sizeof(void *) * L1_SIZE
);
337 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
341 /* allocate if not found */
344 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
346 for (i
= 0; i
< L2_SIZE
; i
++)
347 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
349 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
352 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
354 return phys_page_find_alloc(index
, 0);
357 #if !defined(CONFIG_USER_ONLY)
358 static void tlb_protect_code(ram_addr_t ram_addr
);
359 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
361 #define mmap_lock() do { } while(0)
362 #define mmap_unlock() do { } while(0)
365 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
367 #if defined(CONFIG_USER_ONLY)
368 /* Currently it is not recommanded to allocate big chunks of data in
369 user mode. It will change when a dedicated libc will be used */
370 #define USE_STATIC_CODE_GEN_BUFFER
373 #ifdef USE_STATIC_CODE_GEN_BUFFER
374 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
377 void code_gen_alloc(unsigned long tb_size
)
379 #ifdef USE_STATIC_CODE_GEN_BUFFER
380 code_gen_buffer
= static_code_gen_buffer
;
381 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
382 map_exec(code_gen_buffer
, code_gen_buffer_size
);
384 code_gen_buffer_size
= tb_size
;
385 if (code_gen_buffer_size
== 0) {
386 #if defined(CONFIG_USER_ONLY)
387 /* in user mode, phys_ram_size is not meaningful */
388 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
390 /* XXX: needs ajustments */
391 code_gen_buffer_size
= (int)(phys_ram_size
/ 4);
394 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
395 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
396 /* The code gen buffer location may have constraints depending on
397 the host cpu and OS */
398 #if defined(__linux__)
401 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
402 #if defined(__x86_64__)
404 /* Cannot map more than that */
405 if (code_gen_buffer_size
> (800 * 1024 * 1024))
406 code_gen_buffer_size
= (800 * 1024 * 1024);
408 code_gen_buffer
= mmap(NULL
, code_gen_buffer_size
,
409 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
411 if (code_gen_buffer
== MAP_FAILED
) {
412 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
417 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
418 if (!code_gen_buffer
) {
419 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
422 map_exec(code_gen_buffer
, code_gen_buffer_size
);
424 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
425 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
426 code_gen_buffer_max_size
= code_gen_buffer_size
-
427 code_gen_max_block_size();
428 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
429 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
432 /* Must be called before using the QEMU cpus. 'tb_size' is the size
433 (in bytes) allocated to the translation buffer. Zero means default
435 void cpu_exec_init_all(unsigned long tb_size
)
438 code_gen_alloc(tb_size
);
439 code_gen_ptr
= code_gen_buffer
;
441 #if !defined(CONFIG_USER_ONLY)
446 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
448 #define CPU_COMMON_SAVE_VERSION 1
450 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
452 CPUState
*env
= opaque
;
454 qemu_put_be32s(f
, &env
->halted
);
455 qemu_put_be32s(f
, &env
->interrupt_request
);
458 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
460 CPUState
*env
= opaque
;
462 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
465 qemu_get_be32s(f
, &env
->halted
);
466 qemu_put_be32s(f
, &env
->interrupt_request
);
473 void cpu_exec_init(CPUState
*env
)
478 env
->next_cpu
= NULL
;
481 while (*penv
!= NULL
) {
482 penv
= (CPUState
**)&(*penv
)->next_cpu
;
485 env
->cpu_index
= cpu_index
;
486 env
->nb_watchpoints
= 0;
488 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
489 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
490 cpu_common_save
, cpu_common_load
, env
);
491 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
492 cpu_save
, cpu_load
, env
);
496 static inline void invalidate_page_bitmap(PageDesc
*p
)
498 if (p
->code_bitmap
) {
499 qemu_free(p
->code_bitmap
);
500 p
->code_bitmap
= NULL
;
502 p
->code_write_count
= 0;
505 /* set to NULL all the 'first_tb' fields in all PageDescs */
506 static void page_flush_tb(void)
511 for(i
= 0; i
< L1_SIZE
; i
++) {
514 for(j
= 0; j
< L2_SIZE
; j
++) {
516 invalidate_page_bitmap(p
);
523 /* flush all the translation blocks */
524 /* XXX: tb_flush is currently not thread safe */
525 void tb_flush(CPUState
*env1
)
528 #if defined(DEBUG_FLUSH)
529 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
530 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
532 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
534 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
535 cpu_abort(env1
, "Internal error: code buffer overflow\n");
539 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
540 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
543 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
546 code_gen_ptr
= code_gen_buffer
;
547 /* XXX: flush processor icache at this point if cache flush is
552 #ifdef DEBUG_TB_CHECK
554 static void tb_invalidate_check(target_ulong address
)
556 TranslationBlock
*tb
;
558 address
&= TARGET_PAGE_MASK
;
559 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
560 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
561 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
562 address
>= tb
->pc
+ tb
->size
)) {
563 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
564 address
, (long)tb
->pc
, tb
->size
);
570 /* verify that all the pages have correct rights for code */
571 static void tb_page_check(void)
573 TranslationBlock
*tb
;
574 int i
, flags1
, flags2
;
576 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
577 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
578 flags1
= page_get_flags(tb
->pc
);
579 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
580 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
581 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
582 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
588 void tb_jmp_check(TranslationBlock
*tb
)
590 TranslationBlock
*tb1
;
593 /* suppress any remaining jumps to this TB */
597 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
600 tb1
= tb1
->jmp_next
[n1
];
602 /* check end of list */
604 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
610 /* invalidate one TB */
611 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
614 TranslationBlock
*tb1
;
618 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
621 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
625 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
627 TranslationBlock
*tb1
;
633 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
635 *ptb
= tb1
->page_next
[n1
];
638 ptb
= &tb1
->page_next
[n1
];
642 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
644 TranslationBlock
*tb1
, **ptb
;
647 ptb
= &tb
->jmp_next
[n
];
650 /* find tb(n) in circular list */
654 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
655 if (n1
== n
&& tb1
== tb
)
658 ptb
= &tb1
->jmp_first
;
660 ptb
= &tb1
->jmp_next
[n1
];
663 /* now we can suppress tb(n) from the list */
664 *ptb
= tb
->jmp_next
[n
];
666 tb
->jmp_next
[n
] = NULL
;
670 /* reset the jump entry 'n' of a TB so that it is not chained to
672 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
674 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
677 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
682 target_phys_addr_t phys_pc
;
683 TranslationBlock
*tb1
, *tb2
;
685 /* remove the TB from the hash list */
686 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
687 h
= tb_phys_hash_func(phys_pc
);
688 tb_remove(&tb_phys_hash
[h
], tb
,
689 offsetof(TranslationBlock
, phys_hash_next
));
691 /* remove the TB from the page list */
692 if (tb
->page_addr
[0] != page_addr
) {
693 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
694 tb_page_remove(&p
->first_tb
, tb
);
695 invalidate_page_bitmap(p
);
697 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
698 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
699 tb_page_remove(&p
->first_tb
, tb
);
700 invalidate_page_bitmap(p
);
703 tb_invalidated_flag
= 1;
705 /* remove the TB from the hash list */
706 h
= tb_jmp_cache_hash_func(tb
->pc
);
707 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
708 if (env
->tb_jmp_cache
[h
] == tb
)
709 env
->tb_jmp_cache
[h
] = NULL
;
712 /* suppress this TB from the two jump lists */
713 tb_jmp_remove(tb
, 0);
714 tb_jmp_remove(tb
, 1);
716 /* suppress any remaining jumps to this TB */
722 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
723 tb2
= tb1
->jmp_next
[n1
];
724 tb_reset_jump(tb1
, n1
);
725 tb1
->jmp_next
[n1
] = NULL
;
728 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
730 tb_phys_invalidate_count
++;
733 static inline void set_bits(uint8_t *tab
, int start
, int len
)
739 mask
= 0xff << (start
& 7);
740 if ((start
& ~7) == (end
& ~7)) {
742 mask
&= ~(0xff << (end
& 7));
747 start
= (start
+ 8) & ~7;
749 while (start
< end1
) {
754 mask
= ~(0xff << (end
& 7));
760 static void build_page_bitmap(PageDesc
*p
)
762 int n
, tb_start
, tb_end
;
763 TranslationBlock
*tb
;
765 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
772 tb
= (TranslationBlock
*)((long)tb
& ~3);
773 /* NOTE: this is subtle as a TB may span two physical pages */
775 /* NOTE: tb_end may be after the end of the page, but
776 it is not a problem */
777 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
778 tb_end
= tb_start
+ tb
->size
;
779 if (tb_end
> TARGET_PAGE_SIZE
)
780 tb_end
= TARGET_PAGE_SIZE
;
783 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
785 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
786 tb
= tb
->page_next
[n
];
790 TranslationBlock
*tb_gen_code(CPUState
*env
,
791 target_ulong pc
, target_ulong cs_base
,
792 int flags
, int cflags
)
794 TranslationBlock
*tb
;
796 target_ulong phys_pc
, phys_page2
, virt_page2
;
799 phys_pc
= get_phys_addr_code(env
, pc
);
802 /* flush must be done */
804 /* cannot fail at this point */
806 /* Don't forget to invalidate previous TB info. */
807 tb_invalidated_flag
= 1;
809 tc_ptr
= code_gen_ptr
;
811 tb
->cs_base
= cs_base
;
814 cpu_gen_code(env
, tb
, &code_gen_size
);
815 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
817 /* check next page if needed */
818 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
820 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
821 phys_page2
= get_phys_addr_code(env
, virt_page2
);
823 tb_link_phys(tb
, phys_pc
, phys_page2
);
827 /* invalidate all TBs which intersect with the target physical page
828 starting in range [start;end[. NOTE: start and end must refer to
829 the same physical page. 'is_cpu_write_access' should be true if called
830 from a real cpu write access: the virtual CPU will exit the current
831 TB if code is modified inside this TB. */
832 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
833 int is_cpu_write_access
)
835 int n
, current_tb_modified
, current_tb_not_found
, current_flags
;
836 CPUState
*env
= cpu_single_env
;
838 TranslationBlock
*tb
, *tb_next
, *current_tb
, *saved_tb
;
839 target_ulong tb_start
, tb_end
;
840 target_ulong current_pc
, current_cs_base
;
842 p
= page_find(start
>> TARGET_PAGE_BITS
);
845 if (!p
->code_bitmap
&&
846 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
847 is_cpu_write_access
) {
848 /* build code bitmap */
849 build_page_bitmap(p
);
852 /* we remove all the TBs in the range [start, end[ */
853 /* XXX: see if in some cases it could be faster to invalidate all the code */
854 current_tb_not_found
= is_cpu_write_access
;
855 current_tb_modified
= 0;
856 current_tb
= NULL
; /* avoid warning */
857 current_pc
= 0; /* avoid warning */
858 current_cs_base
= 0; /* avoid warning */
859 current_flags
= 0; /* avoid warning */
863 tb
= (TranslationBlock
*)((long)tb
& ~3);
864 tb_next
= tb
->page_next
[n
];
865 /* NOTE: this is subtle as a TB may span two physical pages */
867 /* NOTE: tb_end may be after the end of the page, but
868 it is not a problem */
869 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
870 tb_end
= tb_start
+ tb
->size
;
872 tb_start
= tb
->page_addr
[1];
873 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
875 if (!(tb_end
<= start
|| tb_start
>= end
)) {
876 #ifdef TARGET_HAS_PRECISE_SMC
877 if (current_tb_not_found
) {
878 current_tb_not_found
= 0;
880 if (env
->mem_io_pc
) {
881 /* now we have a real cpu fault */
882 current_tb
= tb_find_pc(env
->mem_io_pc
);
885 if (current_tb
== tb
&&
886 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
887 /* If we are modifying the current TB, we must stop
888 its execution. We could be more precise by checking
889 that the modification is after the current PC, but it
890 would require a specialized function to partially
891 restore the CPU state */
893 current_tb_modified
= 1;
894 cpu_restore_state(current_tb
, env
,
895 env
->mem_io_pc
, NULL
);
896 #if defined(TARGET_I386)
897 current_flags
= env
->hflags
;
898 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
899 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
900 current_pc
= current_cs_base
+ env
->eip
;
902 #error unsupported CPU
905 #endif /* TARGET_HAS_PRECISE_SMC */
906 /* we need to do that to handle the case where a signal
907 occurs while doing tb_phys_invalidate() */
910 saved_tb
= env
->current_tb
;
911 env
->current_tb
= NULL
;
913 tb_phys_invalidate(tb
, -1);
915 env
->current_tb
= saved_tb
;
916 if (env
->interrupt_request
&& env
->current_tb
)
917 cpu_interrupt(env
, env
->interrupt_request
);
922 #if !defined(CONFIG_USER_ONLY)
923 /* if no code remaining, no need to continue to use slow writes */
925 invalidate_page_bitmap(p
);
926 if (is_cpu_write_access
) {
927 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
931 #ifdef TARGET_HAS_PRECISE_SMC
932 if (current_tb_modified
) {
933 /* we generate a block containing just the instruction
934 modifying the memory. It will ensure that it cannot modify
936 env
->current_tb
= NULL
;
937 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
938 cpu_resume_from_signal(env
, NULL
);
943 /* len must be <= 8 and start must be a multiple of len */
944 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
951 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
952 cpu_single_env
->mem_io_vaddr
, len
,
954 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
958 p
= page_find(start
>> TARGET_PAGE_BITS
);
961 if (p
->code_bitmap
) {
962 offset
= start
& ~TARGET_PAGE_MASK
;
963 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
964 if (b
& ((1 << len
) - 1))
968 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
972 #if !defined(CONFIG_SOFTMMU)
973 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
974 unsigned long pc
, void *puc
)
976 int n
, current_flags
, current_tb_modified
;
977 target_ulong current_pc
, current_cs_base
;
979 TranslationBlock
*tb
, *current_tb
;
980 #ifdef TARGET_HAS_PRECISE_SMC
981 CPUState
*env
= cpu_single_env
;
984 addr
&= TARGET_PAGE_MASK
;
985 p
= page_find(addr
>> TARGET_PAGE_BITS
);
989 current_tb_modified
= 0;
991 current_pc
= 0; /* avoid warning */
992 current_cs_base
= 0; /* avoid warning */
993 current_flags
= 0; /* avoid warning */
994 #ifdef TARGET_HAS_PRECISE_SMC
996 current_tb
= tb_find_pc(pc
);
1001 tb
= (TranslationBlock
*)((long)tb
& ~3);
1002 #ifdef TARGET_HAS_PRECISE_SMC
1003 if (current_tb
== tb
&&
1004 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1005 /* If we are modifying the current TB, we must stop
1006 its execution. We could be more precise by checking
1007 that the modification is after the current PC, but it
1008 would require a specialized function to partially
1009 restore the CPU state */
1011 current_tb_modified
= 1;
1012 cpu_restore_state(current_tb
, env
, pc
, puc
);
1013 #if defined(TARGET_I386)
1014 current_flags
= env
->hflags
;
1015 current_flags
|= (env
->eflags
& (IOPL_MASK
| TF_MASK
| VM_MASK
));
1016 current_cs_base
= (target_ulong
)env
->segs
[R_CS
].base
;
1017 current_pc
= current_cs_base
+ env
->eip
;
1019 #error unsupported CPU
1022 #endif /* TARGET_HAS_PRECISE_SMC */
1023 tb_phys_invalidate(tb
, addr
);
1024 tb
= tb
->page_next
[n
];
1027 #ifdef TARGET_HAS_PRECISE_SMC
1028 if (current_tb_modified
) {
1029 /* we generate a block containing just the instruction
1030 modifying the memory. It will ensure that it cannot modify
1032 env
->current_tb
= NULL
;
1033 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1034 cpu_resume_from_signal(env
, puc
);
1040 /* add the tb in the target page and protect it if necessary */
1041 static inline void tb_alloc_page(TranslationBlock
*tb
,
1042 unsigned int n
, target_ulong page_addr
)
1045 TranslationBlock
*last_first_tb
;
1047 tb
->page_addr
[n
] = page_addr
;
1048 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1049 tb
->page_next
[n
] = p
->first_tb
;
1050 last_first_tb
= p
->first_tb
;
1051 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1052 invalidate_page_bitmap(p
);
1054 #if defined(TARGET_HAS_SMC) || 1
1056 #if defined(CONFIG_USER_ONLY)
1057 if (p
->flags
& PAGE_WRITE
) {
1062 /* force the host page as non writable (writes will have a
1063 page fault + mprotect overhead) */
1064 page_addr
&= qemu_host_page_mask
;
1066 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1067 addr
+= TARGET_PAGE_SIZE
) {
1069 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1073 p2
->flags
&= ~PAGE_WRITE
;
1074 page_get_flags(addr
);
1076 mprotect(g2h(page_addr
), qemu_host_page_size
,
1077 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1078 #ifdef DEBUG_TB_INVALIDATE
1079 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1084 /* if some code is already present, then the pages are already
1085 protected. So we handle the case where only the first TB is
1086 allocated in a physical page */
1087 if (!last_first_tb
) {
1088 tlb_protect_code(page_addr
);
1092 #endif /* TARGET_HAS_SMC */
1095 /* Allocate a new translation block. Flush the translation buffer if
1096 too many translation blocks or too much generated code. */
1097 TranslationBlock
*tb_alloc(target_ulong pc
)
1099 TranslationBlock
*tb
;
1101 if (nb_tbs
>= code_gen_max_blocks
||
1102 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1104 tb
= &tbs
[nb_tbs
++];
1110 void tb_free(TranslationBlock
*tb
)
1112 /* In practice this is mostly used for single use temporary TB
1113 Ignore the hard cases and just back up if this TB happens to
1114 be the last one generated. */
1115 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1116 code_gen_ptr
= tb
->tc_ptr
;
1121 /* add a new TB and link it to the physical page tables. phys_page2 is
1122 (-1) to indicate that only one page contains the TB. */
1123 void tb_link_phys(TranslationBlock
*tb
,
1124 target_ulong phys_pc
, target_ulong phys_page2
)
1127 TranslationBlock
**ptb
;
1129 /* Grab the mmap lock to stop another thread invalidating this TB
1130 before we are done. */
1132 /* add in the physical hash table */
1133 h
= tb_phys_hash_func(phys_pc
);
1134 ptb
= &tb_phys_hash
[h
];
1135 tb
->phys_hash_next
= *ptb
;
1138 /* add in the page list */
1139 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1140 if (phys_page2
!= -1)
1141 tb_alloc_page(tb
, 1, phys_page2
);
1143 tb
->page_addr
[1] = -1;
1145 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1146 tb
->jmp_next
[0] = NULL
;
1147 tb
->jmp_next
[1] = NULL
;
1149 /* init original jump addresses */
1150 if (tb
->tb_next_offset
[0] != 0xffff)
1151 tb_reset_jump(tb
, 0);
1152 if (tb
->tb_next_offset
[1] != 0xffff)
1153 tb_reset_jump(tb
, 1);
1155 #ifdef DEBUG_TB_CHECK
1161 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1162 tb[1].tc_ptr. Return NULL if not found */
1163 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1165 int m_min
, m_max
, m
;
1167 TranslationBlock
*tb
;
1171 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1172 tc_ptr
>= (unsigned long)code_gen_ptr
)
1174 /* binary search (cf Knuth) */
1177 while (m_min
<= m_max
) {
1178 m
= (m_min
+ m_max
) >> 1;
1180 v
= (unsigned long)tb
->tc_ptr
;
1183 else if (tc_ptr
< v
) {
1192 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1194 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1196 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1199 tb1
= tb
->jmp_next
[n
];
1201 /* find head of list */
1204 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1207 tb1
= tb1
->jmp_next
[n1
];
1209 /* we are now sure now that tb jumps to tb1 */
1212 /* remove tb from the jmp_first list */
1213 ptb
= &tb_next
->jmp_first
;
1217 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1218 if (n1
== n
&& tb1
== tb
)
1220 ptb
= &tb1
->jmp_next
[n1
];
1222 *ptb
= tb
->jmp_next
[n
];
1223 tb
->jmp_next
[n
] = NULL
;
1225 /* suppress the jump to next tb in generated code */
1226 tb_reset_jump(tb
, n
);
1228 /* suppress jumps in the tb on which we could have jumped */
1229 tb_reset_jump_recursive(tb_next
);
1233 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1235 tb_reset_jump_recursive2(tb
, 0);
1236 tb_reset_jump_recursive2(tb
, 1);
1239 #if defined(TARGET_HAS_ICE)
1240 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1242 target_phys_addr_t addr
;
1244 ram_addr_t ram_addr
;
1247 addr
= cpu_get_phys_page_debug(env
, pc
);
1248 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1250 pd
= IO_MEM_UNASSIGNED
;
1252 pd
= p
->phys_offset
;
1254 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1255 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1259 /* Add a watchpoint. */
1260 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, int type
)
1264 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1265 if (addr
== env
->watchpoint
[i
].vaddr
)
1268 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1271 i
= env
->nb_watchpoints
++;
1272 env
->watchpoint
[i
].vaddr
= addr
;
1273 env
->watchpoint
[i
].type
= type
;
1274 tlb_flush_page(env
, addr
);
1275 /* FIXME: This flush is needed because of the hack to make memory ops
1276 terminate the TB. It can be removed once the proper IO trap and
1277 re-execute bits are in. */
1282 /* Remove a watchpoint. */
1283 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1287 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1288 if (addr
== env
->watchpoint
[i
].vaddr
) {
1289 env
->nb_watchpoints
--;
1290 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1291 tlb_flush_page(env
, addr
);
1298 /* Remove all watchpoints. */
1299 void cpu_watchpoint_remove_all(CPUState
*env
) {
1302 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1303 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1305 env
->nb_watchpoints
= 0;
1308 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1309 breakpoint is reached */
1310 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1312 #if defined(TARGET_HAS_ICE)
1315 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1316 if (env
->breakpoints
[i
] == pc
)
1320 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1322 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1324 breakpoint_invalidate(env
, pc
);
1331 /* remove all breakpoints */
1332 void cpu_breakpoint_remove_all(CPUState
*env
) {
1333 #if defined(TARGET_HAS_ICE)
1335 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1336 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1338 env
->nb_breakpoints
= 0;
1342 /* remove a breakpoint */
1343 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1345 #if defined(TARGET_HAS_ICE)
1347 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1348 if (env
->breakpoints
[i
] == pc
)
1353 env
->nb_breakpoints
--;
1354 if (i
< env
->nb_breakpoints
)
1355 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1357 breakpoint_invalidate(env
, pc
);
1364 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1365 CPU loop after each instruction */
1366 void cpu_single_step(CPUState
*env
, int enabled
)
1368 #if defined(TARGET_HAS_ICE)
1369 if (env
->singlestep_enabled
!= enabled
) {
1370 env
->singlestep_enabled
= enabled
;
1371 /* must flush all the translated code to avoid inconsistancies */
1372 /* XXX: only flush what is necessary */
1378 /* enable or disable low levels log */
1379 void cpu_set_log(int log_flags
)
1381 loglevel
= log_flags
;
1382 if (loglevel
&& !logfile
) {
1383 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1385 perror(logfilename
);
1388 #if !defined(CONFIG_SOFTMMU)
1389 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1391 static uint8_t logfile_buf
[4096];
1392 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1395 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1399 if (!loglevel
&& logfile
) {
1405 void cpu_set_log_filename(const char *filename
)
1407 logfilename
= strdup(filename
);
1412 cpu_set_log(loglevel
);
1415 /* mask must never be zero, except for A20 change call */
1416 void cpu_interrupt(CPUState
*env
, int mask
)
1418 #if !defined(USE_NPTL)
1419 TranslationBlock
*tb
;
1420 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1424 old_mask
= env
->interrupt_request
;
1425 /* FIXME: This is probably not threadsafe. A different thread could
1426 be in the middle of a read-modify-write operation. */
1427 env
->interrupt_request
|= mask
;
1428 #if defined(USE_NPTL)
1429 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1430 problem and hope the cpu will stop of its own accord. For userspace
1431 emulation this often isn't actually as bad as it sounds. Often
1432 signals are used primarily to interrupt blocking syscalls. */
1435 env
->icount_decr
.u16
.high
= 0x8000;
1436 #ifndef CONFIG_USER_ONLY
1437 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1438 an async event happened and we need to process it. */
1440 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1441 cpu_abort(env
, "Raised interrupt while not in I/O function");
1445 tb
= env
->current_tb
;
1446 /* if the cpu is currently executing code, we must unlink it and
1447 all the potentially executing TB */
1448 if (tb
&& !testandset(&interrupt_lock
)) {
1449 env
->current_tb
= NULL
;
1450 tb_reset_jump_recursive(tb
);
1451 resetlock(&interrupt_lock
);
1457 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1459 env
->interrupt_request
&= ~mask
;
1462 CPULogItem cpu_log_items
[] = {
1463 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1464 "show generated host assembly code for each compiled TB" },
1465 { CPU_LOG_TB_IN_ASM
, "in_asm",
1466 "show target assembly code for each compiled TB" },
1467 { CPU_LOG_TB_OP
, "op",
1468 "show micro ops for each compiled TB" },
1469 { CPU_LOG_TB_OP_OPT
, "op_opt",
1472 "before eflags optimization and "
1474 "after liveness analysis" },
1475 { CPU_LOG_INT
, "int",
1476 "show interrupts/exceptions in short format" },
1477 { CPU_LOG_EXEC
, "exec",
1478 "show trace before each executed TB (lots of logs)" },
1479 { CPU_LOG_TB_CPU
, "cpu",
1480 "show CPU state before block translation" },
1482 { CPU_LOG_PCALL
, "pcall",
1483 "show protected mode far calls/returns/exceptions" },
1486 { CPU_LOG_IOPORT
, "ioport",
1487 "show all i/o ports accesses" },
1492 static int cmp1(const char *s1
, int n
, const char *s2
)
1494 if (strlen(s2
) != n
)
1496 return memcmp(s1
, s2
, n
) == 0;
1499 /* takes a comma separated list of log masks. Return 0 if error. */
1500 int cpu_str_to_log_mask(const char *str
)
1509 p1
= strchr(p
, ',');
1512 if(cmp1(p
,p1
-p
,"all")) {
1513 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1517 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1518 if (cmp1(p
, p1
- p
, item
->name
))
1532 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1539 fprintf(stderr
, "qemu: fatal: ");
1540 vfprintf(stderr
, fmt
, ap
);
1541 fprintf(stderr
, "\n");
1543 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1545 cpu_dump_state(env
, stderr
, fprintf
, 0);
1548 fprintf(logfile
, "qemu: fatal: ");
1549 vfprintf(logfile
, fmt
, ap2
);
1550 fprintf(logfile
, "\n");
1552 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1554 cpu_dump_state(env
, logfile
, fprintf
, 0);
1564 CPUState
*cpu_copy(CPUState
*env
)
1566 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1567 /* preserve chaining and index */
1568 CPUState
*next_cpu
= new_env
->next_cpu
;
1569 int cpu_index
= new_env
->cpu_index
;
1570 memcpy(new_env
, env
, sizeof(CPUState
));
1571 new_env
->next_cpu
= next_cpu
;
1572 new_env
->cpu_index
= cpu_index
;
1576 #if !defined(CONFIG_USER_ONLY)
1578 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1582 /* Discard jump cache entries for any tb which might potentially
1583 overlap the flushed page. */
1584 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1585 memset (&env
->tb_jmp_cache
[i
], 0,
1586 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1588 i
= tb_jmp_cache_hash_page(addr
);
1589 memset (&env
->tb_jmp_cache
[i
], 0,
1590 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1593 /* NOTE: if flush_global is true, also flush global entries (not
1595 void tlb_flush(CPUState
*env
, int flush_global
)
1599 #if defined(DEBUG_TLB)
1600 printf("tlb_flush:\n");
1602 /* must reset current TB so that interrupts cannot modify the
1603 links while we are modifying them */
1604 env
->current_tb
= NULL
;
1606 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1607 env
->tlb_table
[0][i
].addr_read
= -1;
1608 env
->tlb_table
[0][i
].addr_write
= -1;
1609 env
->tlb_table
[0][i
].addr_code
= -1;
1610 env
->tlb_table
[1][i
].addr_read
= -1;
1611 env
->tlb_table
[1][i
].addr_write
= -1;
1612 env
->tlb_table
[1][i
].addr_code
= -1;
1613 #if (NB_MMU_MODES >= 3)
1614 env
->tlb_table
[2][i
].addr_read
= -1;
1615 env
->tlb_table
[2][i
].addr_write
= -1;
1616 env
->tlb_table
[2][i
].addr_code
= -1;
1617 #if (NB_MMU_MODES == 4)
1618 env
->tlb_table
[3][i
].addr_read
= -1;
1619 env
->tlb_table
[3][i
].addr_write
= -1;
1620 env
->tlb_table
[3][i
].addr_code
= -1;
1625 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1628 if (env
->kqemu_enabled
) {
1629 kqemu_flush(env
, flush_global
);
1635 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1637 if (addr
== (tlb_entry
->addr_read
&
1638 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1639 addr
== (tlb_entry
->addr_write
&
1640 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1641 addr
== (tlb_entry
->addr_code
&
1642 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1643 tlb_entry
->addr_read
= -1;
1644 tlb_entry
->addr_write
= -1;
1645 tlb_entry
->addr_code
= -1;
1649 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1653 #if defined(DEBUG_TLB)
1654 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1656 /* must reset current TB so that interrupts cannot modify the
1657 links while we are modifying them */
1658 env
->current_tb
= NULL
;
1660 addr
&= TARGET_PAGE_MASK
;
1661 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1662 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1663 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1664 #if (NB_MMU_MODES >= 3)
1665 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1666 #if (NB_MMU_MODES == 4)
1667 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1671 tlb_flush_jmp_cache(env
, addr
);
1674 if (env
->kqemu_enabled
) {
1675 kqemu_flush_page(env
, addr
);
1680 /* update the TLBs so that writes to code in the virtual page 'addr'
1682 static void tlb_protect_code(ram_addr_t ram_addr
)
1684 cpu_physical_memory_reset_dirty(ram_addr
,
1685 ram_addr
+ TARGET_PAGE_SIZE
,
1689 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1690 tested for self modifying code */
1691 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1694 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1697 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1698 unsigned long start
, unsigned long length
)
1701 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1702 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1703 if ((addr
- start
) < length
) {
1704 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1709 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1713 unsigned long length
, start1
;
1717 start
&= TARGET_PAGE_MASK
;
1718 end
= TARGET_PAGE_ALIGN(end
);
1720 length
= end
- start
;
1723 len
= length
>> TARGET_PAGE_BITS
;
1725 /* XXX: should not depend on cpu context */
1727 if (env
->kqemu_enabled
) {
1730 for(i
= 0; i
< len
; i
++) {
1731 kqemu_set_notdirty(env
, addr
);
1732 addr
+= TARGET_PAGE_SIZE
;
1736 mask
= ~dirty_flags
;
1737 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1738 for(i
= 0; i
< len
; i
++)
1741 /* we modify the TLB cache so that the dirty bit will be set again
1742 when accessing the range */
1743 start1
= start
+ (unsigned long)phys_ram_base
;
1744 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1745 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1746 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1747 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1748 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1749 #if (NB_MMU_MODES >= 3)
1750 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1751 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1752 #if (NB_MMU_MODES == 4)
1753 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1754 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1760 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1762 ram_addr_t ram_addr
;
1764 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1765 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1766 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1767 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1768 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1773 /* update the TLB according to the current state of the dirty bits */
1774 void cpu_tlb_update_dirty(CPUState
*env
)
1777 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1778 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1779 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1780 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1781 #if (NB_MMU_MODES >= 3)
1782 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1783 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1784 #if (NB_MMU_MODES == 4)
1785 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1786 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1791 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1793 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1794 tlb_entry
->addr_write
= vaddr
;
1797 /* update the TLB corresponding to virtual page vaddr
1798 so that it is no longer dirty */
1799 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1803 vaddr
&= TARGET_PAGE_MASK
;
1804 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1805 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1806 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1807 #if (NB_MMU_MODES >= 3)
1808 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1809 #if (NB_MMU_MODES == 4)
1810 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1815 /* add a new TLB entry. At most one entry for a given virtual address
1816 is permitted. Return 0 if OK or 2 if the page could not be mapped
1817 (can only happen in non SOFTMMU mode for I/O pages or pages
1818 conflicting with the host address space). */
1819 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1820 target_phys_addr_t paddr
, int prot
,
1821 int mmu_idx
, int is_softmmu
)
1826 target_ulong address
;
1827 target_ulong code_address
;
1828 target_phys_addr_t addend
;
1832 target_phys_addr_t iotlb
;
1834 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1836 pd
= IO_MEM_UNASSIGNED
;
1838 pd
= p
->phys_offset
;
1840 #if defined(DEBUG_TLB)
1841 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1842 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1847 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1848 /* IO memory case (romd handled later) */
1849 address
|= TLB_MMIO
;
1851 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1852 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1854 iotlb
= pd
& TARGET_PAGE_MASK
;
1855 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1856 iotlb
|= IO_MEM_NOTDIRTY
;
1858 iotlb
|= IO_MEM_ROM
;
1860 /* IO handlers are currently passed a phsical address.
1861 It would be nice to pass an offset from the base address
1862 of that region. This would avoid having to special case RAM,
1863 and avoid full address decoding in every device.
1864 We can't use the high bits of pd for this because
1865 IO_MEM_ROMD uses these as a ram address. */
1866 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1869 code_address
= address
;
1870 /* Make accesses to pages with watchpoints go via the
1871 watchpoint trap routines. */
1872 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1873 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1874 iotlb
= io_mem_watch
+ paddr
;
1875 /* TODO: The memory case can be optimized by not trapping
1876 reads of pages with a write breakpoint. */
1877 address
|= TLB_MMIO
;
1881 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1882 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1883 te
= &env
->tlb_table
[mmu_idx
][index
];
1884 te
->addend
= addend
- vaddr
;
1885 if (prot
& PAGE_READ
) {
1886 te
->addr_read
= address
;
1891 if (prot
& PAGE_EXEC
) {
1892 te
->addr_code
= code_address
;
1896 if (prot
& PAGE_WRITE
) {
1897 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1898 (pd
& IO_MEM_ROMD
)) {
1899 /* Write access calls the I/O callback. */
1900 te
->addr_write
= address
| TLB_MMIO
;
1901 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1902 !cpu_physical_memory_is_dirty(pd
)) {
1903 te
->addr_write
= address
| TLB_NOTDIRTY
;
1905 te
->addr_write
= address
;
1908 te
->addr_write
= -1;
1915 void tlb_flush(CPUState
*env
, int flush_global
)
1919 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1923 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1924 target_phys_addr_t paddr
, int prot
,
1925 int mmu_idx
, int is_softmmu
)
1930 /* dump memory mappings */
1931 void page_dump(FILE *f
)
1933 unsigned long start
, end
;
1934 int i
, j
, prot
, prot1
;
1937 fprintf(f
, "%-8s %-8s %-8s %s\n",
1938 "start", "end", "size", "prot");
1942 for(i
= 0; i
<= L1_SIZE
; i
++) {
1947 for(j
= 0;j
< L2_SIZE
; j
++) {
1952 if (prot1
!= prot
) {
1953 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
1955 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
1956 start
, end
, end
- start
,
1957 prot
& PAGE_READ
? 'r' : '-',
1958 prot
& PAGE_WRITE
? 'w' : '-',
1959 prot
& PAGE_EXEC
? 'x' : '-');
1973 int page_get_flags(target_ulong address
)
1977 p
= page_find(address
>> TARGET_PAGE_BITS
);
1983 /* modify the flags of a page and invalidate the code if
1984 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1985 depending on PAGE_WRITE */
1986 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
1991 /* mmap_lock should already be held. */
1992 start
= start
& TARGET_PAGE_MASK
;
1993 end
= TARGET_PAGE_ALIGN(end
);
1994 if (flags
& PAGE_WRITE
)
1995 flags
|= PAGE_WRITE_ORG
;
1996 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
1997 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
1998 /* We may be called for host regions that are outside guest
2002 /* if the write protection is set, then we invalidate the code
2004 if (!(p
->flags
& PAGE_WRITE
) &&
2005 (flags
& PAGE_WRITE
) &&
2007 tb_invalidate_phys_page(addr
, 0, NULL
);
2013 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2019 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2020 start
= start
& TARGET_PAGE_MASK
;
2023 /* we've wrapped around */
2025 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2026 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2029 if( !(p
->flags
& PAGE_VALID
) )
2032 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2034 if (flags
& PAGE_WRITE
) {
2035 if (!(p
->flags
& PAGE_WRITE_ORG
))
2037 /* unprotect the page if it was put read-only because it
2038 contains translated code */
2039 if (!(p
->flags
& PAGE_WRITE
)) {
2040 if (!page_unprotect(addr
, 0, NULL
))
2049 /* called from signal handler: invalidate the code and unprotect the
2050 page. Return TRUE if the fault was succesfully handled. */
2051 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2053 unsigned int page_index
, prot
, pindex
;
2055 target_ulong host_start
, host_end
, addr
;
2057 /* Technically this isn't safe inside a signal handler. However we
2058 know this only ever happens in a synchronous SEGV handler, so in
2059 practice it seems to be ok. */
2062 host_start
= address
& qemu_host_page_mask
;
2063 page_index
= host_start
>> TARGET_PAGE_BITS
;
2064 p1
= page_find(page_index
);
2069 host_end
= host_start
+ qemu_host_page_size
;
2072 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2076 /* if the page was really writable, then we change its
2077 protection back to writable */
2078 if (prot
& PAGE_WRITE_ORG
) {
2079 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2080 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2081 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2082 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2083 p1
[pindex
].flags
|= PAGE_WRITE
;
2084 /* and since the content will be modified, we must invalidate
2085 the corresponding translated code. */
2086 tb_invalidate_phys_page(address
, pc
, puc
);
2087 #ifdef DEBUG_TB_CHECK
2088 tb_invalidate_check(address
);
2098 static inline void tlb_set_dirty(CPUState
*env
,
2099 unsigned long addr
, target_ulong vaddr
)
2102 #endif /* defined(CONFIG_USER_ONLY) */
2104 #if !defined(CONFIG_USER_ONLY)
2105 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2107 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2108 ram_addr_t orig_memory
);
2109 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2112 if (addr > start_addr) \
2115 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2116 if (start_addr2 > 0) \
2120 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2121 end_addr2 = TARGET_PAGE_SIZE - 1; \
2123 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2129 /* register physical memory. 'size' must be a multiple of the target
2130 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2132 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2134 ram_addr_t phys_offset
)
2136 target_phys_addr_t addr
, end_addr
;
2139 ram_addr_t orig_size
= size
;
2143 /* XXX: should not depend on cpu context */
2145 if (env
->kqemu_enabled
) {
2146 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2149 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2150 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2151 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2152 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2153 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2154 ram_addr_t orig_memory
= p
->phys_offset
;
2155 target_phys_addr_t start_addr2
, end_addr2
;
2156 int need_subpage
= 0;
2158 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2160 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2161 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2162 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2163 &p
->phys_offset
, orig_memory
);
2165 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2168 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2170 p
->phys_offset
= phys_offset
;
2171 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2172 (phys_offset
& IO_MEM_ROMD
))
2173 phys_offset
+= TARGET_PAGE_SIZE
;
2176 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2177 p
->phys_offset
= phys_offset
;
2178 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2179 (phys_offset
& IO_MEM_ROMD
))
2180 phys_offset
+= TARGET_PAGE_SIZE
;
2182 target_phys_addr_t start_addr2
, end_addr2
;
2183 int need_subpage
= 0;
2185 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2186 end_addr2
, need_subpage
);
2188 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2189 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2190 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2191 subpage_register(subpage
, start_addr2
, end_addr2
,
2198 /* since each CPU stores ram addresses in its TLB cache, we must
2199 reset the modified entries */
2201 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2206 /* XXX: temporary until new memory mapping API */
2207 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2211 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2213 return IO_MEM_UNASSIGNED
;
2214 return p
->phys_offset
;
2217 /* XXX: better than nothing */
2218 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2221 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2222 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
"\n",
2223 (uint64_t)size
, (uint64_t)phys_ram_size
);
2226 addr
= phys_ram_alloc_offset
;
2227 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2231 void qemu_ram_free(ram_addr_t addr
)
2235 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2237 #ifdef DEBUG_UNASSIGNED
2238 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2241 do_unassigned_access(addr
, 0, 0, 0);
2243 do_unassigned_access(addr
, 0, 0, 0);
2248 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2250 #ifdef DEBUG_UNASSIGNED
2251 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2254 do_unassigned_access(addr
, 1, 0, 0);
2256 do_unassigned_access(addr
, 1, 0, 0);
2260 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2261 unassigned_mem_readb
,
2262 unassigned_mem_readb
,
2263 unassigned_mem_readb
,
2266 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2267 unassigned_mem_writeb
,
2268 unassigned_mem_writeb
,
2269 unassigned_mem_writeb
,
2272 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2276 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2277 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2278 #if !defined(CONFIG_USER_ONLY)
2279 tb_invalidate_phys_page_fast(ram_addr
, 1);
2280 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2283 stb_p(phys_ram_base
+ ram_addr
, val
);
2285 if (cpu_single_env
->kqemu_enabled
&&
2286 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2287 kqemu_modify_page(cpu_single_env
, ram_addr
);
2289 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2290 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2291 /* we remove the notdirty callback only if the code has been
2293 if (dirty_flags
== 0xff)
2294 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2297 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2301 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2302 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2303 #if !defined(CONFIG_USER_ONLY)
2304 tb_invalidate_phys_page_fast(ram_addr
, 2);
2305 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2308 stw_p(phys_ram_base
+ ram_addr
, val
);
2310 if (cpu_single_env
->kqemu_enabled
&&
2311 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2312 kqemu_modify_page(cpu_single_env
, ram_addr
);
2314 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2315 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2316 /* we remove the notdirty callback only if the code has been
2318 if (dirty_flags
== 0xff)
2319 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2322 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2326 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2327 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2328 #if !defined(CONFIG_USER_ONLY)
2329 tb_invalidate_phys_page_fast(ram_addr
, 4);
2330 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2333 stl_p(phys_ram_base
+ ram_addr
, val
);
2335 if (cpu_single_env
->kqemu_enabled
&&
2336 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2337 kqemu_modify_page(cpu_single_env
, ram_addr
);
2339 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2340 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2341 /* we remove the notdirty callback only if the code has been
2343 if (dirty_flags
== 0xff)
2344 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2347 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2348 NULL
, /* never used */
2349 NULL
, /* never used */
2350 NULL
, /* never used */
2353 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2354 notdirty_mem_writeb
,
2355 notdirty_mem_writew
,
2356 notdirty_mem_writel
,
2359 /* Generate a debug exception if a watchpoint has been hit. */
2360 static void check_watchpoint(int offset
, int flags
)
2362 CPUState
*env
= cpu_single_env
;
2366 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2367 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2368 if (vaddr
== env
->watchpoint
[i
].vaddr
2369 && (env
->watchpoint
[i
].type
& flags
)) {
2370 env
->watchpoint_hit
= i
+ 1;
2371 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2377 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2378 so these check for a hit then pass through to the normal out-of-line
2380 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2382 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2383 return ldub_phys(addr
);
2386 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2388 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2389 return lduw_phys(addr
);
2392 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2394 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2395 return ldl_phys(addr
);
2398 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2401 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2402 stb_phys(addr
, val
);
2405 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2408 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2409 stw_phys(addr
, val
);
2412 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2415 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2416 stl_phys(addr
, val
);
2419 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2425 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2431 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2437 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2438 #if defined(DEBUG_SUBPAGE)
2439 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2440 mmio
, len
, addr
, idx
);
2442 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2447 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2448 uint32_t value
, unsigned int len
)
2452 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2453 #if defined(DEBUG_SUBPAGE)
2454 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2455 mmio
, len
, addr
, idx
, value
);
2457 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2460 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2462 #if defined(DEBUG_SUBPAGE)
2463 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2466 return subpage_readlen(opaque
, addr
, 0);
2469 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2472 #if defined(DEBUG_SUBPAGE)
2473 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2475 subpage_writelen(opaque
, addr
, value
, 0);
2478 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2480 #if defined(DEBUG_SUBPAGE)
2481 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2484 return subpage_readlen(opaque
, addr
, 1);
2487 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2490 #if defined(DEBUG_SUBPAGE)
2491 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2493 subpage_writelen(opaque
, addr
, value
, 1);
2496 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2498 #if defined(DEBUG_SUBPAGE)
2499 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2502 return subpage_readlen(opaque
, addr
, 2);
2505 static void subpage_writel (void *opaque
,
2506 target_phys_addr_t addr
, uint32_t value
)
2508 #if defined(DEBUG_SUBPAGE)
2509 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2511 subpage_writelen(opaque
, addr
, value
, 2);
2514 static CPUReadMemoryFunc
*subpage_read
[] = {
2520 static CPUWriteMemoryFunc
*subpage_write
[] = {
2526 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2532 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2534 idx
= SUBPAGE_IDX(start
);
2535 eidx
= SUBPAGE_IDX(end
);
2536 #if defined(DEBUG_SUBPAGE)
2537 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2538 mmio
, start
, end
, idx
, eidx
, memory
);
2540 memory
>>= IO_MEM_SHIFT
;
2541 for (; idx
<= eidx
; idx
++) {
2542 for (i
= 0; i
< 4; i
++) {
2543 if (io_mem_read
[memory
][i
]) {
2544 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2545 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2547 if (io_mem_write
[memory
][i
]) {
2548 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2549 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2557 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2558 ram_addr_t orig_memory
)
2563 mmio
= qemu_mallocz(sizeof(subpage_t
));
2566 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2567 #if defined(DEBUG_SUBPAGE)
2568 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2569 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2571 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2572 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2578 static void io_mem_init(void)
2580 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2581 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2582 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2585 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2586 watch_mem_write
, NULL
);
2587 /* alloc dirty bits array */
2588 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2589 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2592 /* mem_read and mem_write are arrays of functions containing the
2593 function to access byte (index 0), word (index 1) and dword (index
2594 2). Functions can be omitted with a NULL function pointer. The
2595 registered functions may be modified dynamically later.
2596 If io_index is non zero, the corresponding io zone is
2597 modified. If it is zero, a new io zone is allocated. The return
2598 value can be used with cpu_register_physical_memory(). (-1) is
2599 returned if error. */
2600 int cpu_register_io_memory(int io_index
,
2601 CPUReadMemoryFunc
**mem_read
,
2602 CPUWriteMemoryFunc
**mem_write
,
2605 int i
, subwidth
= 0;
2607 if (io_index
<= 0) {
2608 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2610 io_index
= io_mem_nb
++;
2612 if (io_index
>= IO_MEM_NB_ENTRIES
)
2616 for(i
= 0;i
< 3; i
++) {
2617 if (!mem_read
[i
] || !mem_write
[i
])
2618 subwidth
= IO_MEM_SUBWIDTH
;
2619 io_mem_read
[io_index
][i
] = mem_read
[i
];
2620 io_mem_write
[io_index
][i
] = mem_write
[i
];
2622 io_mem_opaque
[io_index
] = opaque
;
2623 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2626 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2628 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2631 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2633 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2636 #endif /* !defined(CONFIG_USER_ONLY) */
2638 /* physical memory access (slow version, mainly for debug) */
2639 #if defined(CONFIG_USER_ONLY)
2640 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2641 int len
, int is_write
)
2648 page
= addr
& TARGET_PAGE_MASK
;
2649 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2652 flags
= page_get_flags(page
);
2653 if (!(flags
& PAGE_VALID
))
2656 if (!(flags
& PAGE_WRITE
))
2658 /* XXX: this code should not depend on lock_user */
2659 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2660 /* FIXME - should this return an error rather than just fail? */
2663 unlock_user(p
, addr
, l
);
2665 if (!(flags
& PAGE_READ
))
2667 /* XXX: this code should not depend on lock_user */
2668 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2669 /* FIXME - should this return an error rather than just fail? */
2672 unlock_user(p
, addr
, 0);
2681 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2682 int len
, int is_write
)
2687 target_phys_addr_t page
;
2692 page
= addr
& TARGET_PAGE_MASK
;
2693 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2696 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2698 pd
= IO_MEM_UNASSIGNED
;
2700 pd
= p
->phys_offset
;
2704 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2705 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2706 /* XXX: could force cpu_single_env to NULL to avoid
2708 if (l
>= 4 && ((addr
& 3) == 0)) {
2709 /* 32 bit write access */
2711 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2713 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2714 /* 16 bit write access */
2716 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2719 /* 8 bit write access */
2721 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2725 unsigned long addr1
;
2726 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2728 ptr
= phys_ram_base
+ addr1
;
2729 memcpy(ptr
, buf
, l
);
2730 if (!cpu_physical_memory_is_dirty(addr1
)) {
2731 /* invalidate code */
2732 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2734 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2735 (0xff & ~CODE_DIRTY_FLAG
);
2739 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2740 !(pd
& IO_MEM_ROMD
)) {
2742 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2743 if (l
>= 4 && ((addr
& 3) == 0)) {
2744 /* 32 bit read access */
2745 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2748 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2749 /* 16 bit read access */
2750 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2754 /* 8 bit read access */
2755 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2761 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2762 (addr
& ~TARGET_PAGE_MASK
);
2763 memcpy(buf
, ptr
, l
);
2772 /* used for ROM loading : can write in RAM and ROM */
2773 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2774 const uint8_t *buf
, int len
)
2778 target_phys_addr_t page
;
2783 page
= addr
& TARGET_PAGE_MASK
;
2784 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2787 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2789 pd
= IO_MEM_UNASSIGNED
;
2791 pd
= p
->phys_offset
;
2794 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2795 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2796 !(pd
& IO_MEM_ROMD
)) {
2799 unsigned long addr1
;
2800 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2802 ptr
= phys_ram_base
+ addr1
;
2803 memcpy(ptr
, buf
, l
);
2812 /* warning: addr must be aligned */
2813 uint32_t ldl_phys(target_phys_addr_t addr
)
2821 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2823 pd
= IO_MEM_UNASSIGNED
;
2825 pd
= p
->phys_offset
;
2828 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2829 !(pd
& IO_MEM_ROMD
)) {
2831 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2832 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2835 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2836 (addr
& ~TARGET_PAGE_MASK
);
2842 /* warning: addr must be aligned */
2843 uint64_t ldq_phys(target_phys_addr_t addr
)
2851 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2853 pd
= IO_MEM_UNASSIGNED
;
2855 pd
= p
->phys_offset
;
2858 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2859 !(pd
& IO_MEM_ROMD
)) {
2861 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2862 #ifdef TARGET_WORDS_BIGENDIAN
2863 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2864 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2866 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2867 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2871 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2872 (addr
& ~TARGET_PAGE_MASK
);
2879 uint32_t ldub_phys(target_phys_addr_t addr
)
2882 cpu_physical_memory_read(addr
, &val
, 1);
2887 uint32_t lduw_phys(target_phys_addr_t addr
)
2890 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2891 return tswap16(val
);
2894 /* warning: addr must be aligned. The ram page is not masked as dirty
2895 and the code inside is not invalidated. It is useful if the dirty
2896 bits are used to track modified PTEs */
2897 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2904 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2906 pd
= IO_MEM_UNASSIGNED
;
2908 pd
= p
->phys_offset
;
2911 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2912 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2913 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2915 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2916 (addr
& ~TARGET_PAGE_MASK
);
2921 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
2928 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2930 pd
= IO_MEM_UNASSIGNED
;
2932 pd
= p
->phys_offset
;
2935 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2936 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2937 #ifdef TARGET_WORDS_BIGENDIAN
2938 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
2939 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
2941 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2942 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
2945 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2946 (addr
& ~TARGET_PAGE_MASK
);
2951 /* warning: addr must be aligned */
2952 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
2959 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2961 pd
= IO_MEM_UNASSIGNED
;
2963 pd
= p
->phys_offset
;
2966 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2967 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2968 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2970 unsigned long addr1
;
2971 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2973 ptr
= phys_ram_base
+ addr1
;
2975 if (!cpu_physical_memory_is_dirty(addr1
)) {
2976 /* invalidate code */
2977 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
2979 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2980 (0xff & ~CODE_DIRTY_FLAG
);
2986 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
2989 cpu_physical_memory_write(addr
, &v
, 1);
2993 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
2995 uint16_t v
= tswap16(val
);
2996 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3000 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3003 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3008 /* virtual memory access for debug */
3009 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3010 uint8_t *buf
, int len
, int is_write
)
3013 target_phys_addr_t phys_addr
;
3017 page
= addr
& TARGET_PAGE_MASK
;
3018 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3019 /* if no physical page mapped, return an error */
3020 if (phys_addr
== -1)
3022 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3025 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3034 /* in deterministic execution mode, instructions doing device I/Os
3035 must be at the end of the TB */
3036 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3038 TranslationBlock
*tb
;
3040 target_ulong pc
, cs_base
;
3043 tb
= tb_find_pc((unsigned long)retaddr
);
3045 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3048 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3049 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3050 /* Calculate how many instructions had been executed before the fault
3052 n
= n
- env
->icount_decr
.u16
.low
;
3053 /* Generate a new TB ending on the I/O insn. */
3055 /* On MIPS and SH, delay slot instructions can only be restarted if
3056 they were already the first instruction in the TB. If this is not
3057 the first instruction in a TB then re-execute the preceding
3059 #if defined(TARGET_MIPS)
3060 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3061 env
->active_tc
.PC
-= 4;
3062 env
->icount_decr
.u16
.low
++;
3063 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3065 #elif defined(TARGET_SH4)
3066 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3069 env
->icount_decr
.u16
.low
++;
3070 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3073 /* This should never happen. */
3074 if (n
> CF_COUNT_MASK
)
3075 cpu_abort(env
, "TB too big during recompile");
3077 cflags
= n
| CF_LAST_IO
;
3079 cs_base
= tb
->cs_base
;
3081 tb_phys_invalidate(tb
, -1);
3082 /* FIXME: In theory this could raise an exception. In practice
3083 we have already translated the block once so it's probably ok. */
3084 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3085 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3086 the first in the TB) then we end up generating a whole new TB and
3087 repeating the fault, which is horribly inefficient.
3088 Better would be to execute just this insn uncached, or generate a
3090 cpu_resume_from_signal(env
, NULL
);
3093 void dump_exec_info(FILE *f
,
3094 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3096 int i
, target_code_size
, max_target_code_size
;
3097 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3098 TranslationBlock
*tb
;
3100 target_code_size
= 0;
3101 max_target_code_size
= 0;
3103 direct_jmp_count
= 0;
3104 direct_jmp2_count
= 0;
3105 for(i
= 0; i
< nb_tbs
; i
++) {
3107 target_code_size
+= tb
->size
;
3108 if (tb
->size
> max_target_code_size
)
3109 max_target_code_size
= tb
->size
;
3110 if (tb
->page_addr
[1] != -1)
3112 if (tb
->tb_next_offset
[0] != 0xffff) {
3114 if (tb
->tb_next_offset
[1] != 0xffff) {
3115 direct_jmp2_count
++;
3119 /* XXX: avoid using doubles ? */
3120 cpu_fprintf(f
, "Translation buffer state:\n");
3121 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3122 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3123 cpu_fprintf(f
, "TB count %d/%d\n",
3124 nb_tbs
, code_gen_max_blocks
);
3125 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3126 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3127 max_target_code_size
);
3128 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3129 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3130 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3131 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3133 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3134 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3136 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3138 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3139 cpu_fprintf(f
, "\nStatistics:\n");
3140 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3141 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3142 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3143 tcg_dump_info(f
, cpu_fprintf
);
3146 #if !defined(CONFIG_USER_ONLY)
3148 #define MMUSUFFIX _cmmu
3149 #define GETPC() NULL
3150 #define env cpu_single_env
3151 #define SOFTMMU_CODE_ACCESS
3154 #include "softmmu_template.h"
3157 #include "softmmu_template.h"
3160 #include "softmmu_template.h"
3163 #include "softmmu_template.h"