2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
152 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153 /* XXX: this is a temporary hack for alpha target.
154 * In the future, this is to be replaced by a multi-level table
155 * to actually be able to handle the complete 64 bits address space.
157 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
162 #define L1_SIZE (1 << L1_BITS)
163 #define L2_SIZE (1 << L2_BITS)
165 unsigned long qemu_real_host_page_size
;
166 unsigned long qemu_host_page_bits
;
167 unsigned long qemu_host_page_size
;
168 unsigned long qemu_host_page_mask
;
170 /* XXX: for system emulation, it could just be an array */
171 static PageDesc
*l1_map
[L1_SIZE
];
172 static PhysPageDesc
**l1_phys_map
;
174 #if !defined(CONFIG_USER_ONLY)
175 static void io_mem_init(void);
177 /* io memory support */
178 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
179 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
180 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
181 static int io_mem_nb
;
182 static int io_mem_watch
;
186 static const char *logfilename
= "/tmp/qemu.log";
189 static int log_append
= 0;
192 static int tlb_flush_count
;
193 static int tb_flush_count
;
194 static int tb_phys_invalidate_count
;
196 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197 typedef struct subpage_t
{
198 target_phys_addr_t base
;
199 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
200 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
201 void *opaque
[TARGET_PAGE_SIZE
][2][4];
205 static void map_exec(void *addr
, long size
)
208 VirtualProtect(addr
, size
,
209 PAGE_EXECUTE_READWRITE
, &old_protect
);
213 static void map_exec(void *addr
, long size
)
215 unsigned long start
, end
, page_size
;
217 page_size
= getpagesize();
218 start
= (unsigned long)addr
;
219 start
&= ~(page_size
- 1);
221 end
= (unsigned long)addr
+ size
;
222 end
+= page_size
- 1;
223 end
&= ~(page_size
- 1);
225 mprotect((void *)start
, end
- start
,
226 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
230 static void page_init(void)
232 /* NOTE: we can always suppose that qemu_host_page_size >=
236 SYSTEM_INFO system_info
;
238 GetSystemInfo(&system_info
);
239 qemu_real_host_page_size
= system_info
.dwPageSize
;
242 qemu_real_host_page_size
= getpagesize();
244 if (qemu_host_page_size
== 0)
245 qemu_host_page_size
= qemu_real_host_page_size
;
246 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
247 qemu_host_page_size
= TARGET_PAGE_SIZE
;
248 qemu_host_page_bits
= 0;
249 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
250 qemu_host_page_bits
++;
251 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
252 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
253 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
255 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
257 long long startaddr
, endaddr
;
262 last_brk
= (unsigned long)sbrk(0);
263 f
= fopen("/proc/self/maps", "r");
266 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
268 startaddr
= MIN(startaddr
,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
270 endaddr
= MIN(endaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
273 TARGET_PAGE_ALIGN(endaddr
),
284 static inline PageDesc
**page_l1_map(target_ulong index
)
286 #if TARGET_LONG_BITS > 32
287 /* Host memory outside guest VM. For 32-bit targets we have already
288 excluded high addresses. */
289 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
292 return &l1_map
[index
>> L2_BITS
];
295 static inline PageDesc
*page_find_alloc(target_ulong index
)
298 lp
= page_l1_map(index
);
304 /* allocate if not found */
305 #if defined(CONFIG_USER_ONLY)
307 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
308 /* Don't use qemu_malloc because it may recurse. */
309 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
310 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
313 if (addr
== (target_ulong
)addr
) {
314 page_set_flags(addr
& TARGET_PAGE_MASK
,
315 TARGET_PAGE_ALIGN(addr
+ len
),
319 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
323 return p
+ (index
& (L2_SIZE
- 1));
326 static inline PageDesc
*page_find(target_ulong index
)
329 lp
= page_l1_map(index
);
336 return p
+ (index
& (L2_SIZE
- 1));
339 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
344 p
= (void **)l1_phys_map
;
345 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
347 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
353 /* allocate if not found */
356 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
357 memset(p
, 0, sizeof(void *) * L1_SIZE
);
361 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
365 /* allocate if not found */
368 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
370 for (i
= 0; i
< L2_SIZE
; i
++)
371 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
373 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
376 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
378 return phys_page_find_alloc(index
, 0);
381 #if !defined(CONFIG_USER_ONLY)
382 static void tlb_protect_code(ram_addr_t ram_addr
);
383 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
385 #define mmap_lock() do { } while(0)
386 #define mmap_unlock() do { } while(0)
389 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391 #if defined(CONFIG_USER_ONLY)
392 /* Currently it is not recommanded to allocate big chunks of data in
393 user mode. It will change when a dedicated libc will be used */
394 #define USE_STATIC_CODE_GEN_BUFFER
397 #ifdef USE_STATIC_CODE_GEN_BUFFER
398 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
401 static void code_gen_alloc(unsigned long tb_size
)
403 #ifdef USE_STATIC_CODE_GEN_BUFFER
404 code_gen_buffer
= static_code_gen_buffer
;
405 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
406 map_exec(code_gen_buffer
, code_gen_buffer_size
);
408 code_gen_buffer_size
= tb_size
;
409 if (code_gen_buffer_size
== 0) {
410 #if defined(CONFIG_USER_ONLY)
411 /* in user mode, phys_ram_size is not meaningful */
412 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
414 /* XXX: needs ajustments */
415 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
418 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
419 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
420 /* The code gen buffer location may have constraints depending on
421 the host cpu and OS */
422 #if defined(__linux__)
427 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
428 #if defined(__x86_64__)
430 /* Cannot map more than that */
431 if (code_gen_buffer_size
> (800 * 1024 * 1024))
432 code_gen_buffer_size
= (800 * 1024 * 1024);
433 #elif defined(__sparc_v9__)
434 // Map the buffer below 2G, so we can use direct calls and branches
436 start
= (void *) 0x60000000UL
;
437 if (code_gen_buffer_size
> (512 * 1024 * 1024))
438 code_gen_buffer_size
= (512 * 1024 * 1024);
440 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
441 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
443 if (code_gen_buffer
== MAP_FAILED
) {
444 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
448 #elif defined(__FreeBSD__)
452 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
453 #if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
457 addr
= (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size
> (800 * 1024 * 1024))
460 code_gen_buffer_size
= (800 * 1024 * 1024);
462 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
463 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
465 if (code_gen_buffer
== MAP_FAILED
) {
466 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
471 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
472 if (!code_gen_buffer
) {
473 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
476 map_exec(code_gen_buffer
, code_gen_buffer_size
);
478 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
479 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
480 code_gen_buffer_max_size
= code_gen_buffer_size
-
481 code_gen_max_block_size();
482 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
483 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
486 /* Must be called before using the QEMU cpus. 'tb_size' is the size
487 (in bytes) allocated to the translation buffer. Zero means default
489 void cpu_exec_init_all(unsigned long tb_size
)
492 code_gen_alloc(tb_size
);
493 code_gen_ptr
= code_gen_buffer
;
495 #if !defined(CONFIG_USER_ONLY)
500 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502 #define CPU_COMMON_SAVE_VERSION 1
504 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
506 CPUState
*env
= opaque
;
508 qemu_put_be32s(f
, &env
->halted
);
509 qemu_put_be32s(f
, &env
->interrupt_request
);
512 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
514 CPUState
*env
= opaque
;
516 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
519 qemu_get_be32s(f
, &env
->halted
);
520 qemu_get_be32s(f
, &env
->interrupt_request
);
527 void cpu_exec_init(CPUState
*env
)
532 env
->next_cpu
= NULL
;
535 while (*penv
!= NULL
) {
536 penv
= (CPUState
**)&(*penv
)->next_cpu
;
539 env
->cpu_index
= cpu_index
;
540 env
->nb_watchpoints
= 0;
542 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
543 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
544 cpu_common_save
, cpu_common_load
, env
);
545 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
546 cpu_save
, cpu_load
, env
);
550 static inline void invalidate_page_bitmap(PageDesc
*p
)
552 if (p
->code_bitmap
) {
553 qemu_free(p
->code_bitmap
);
554 p
->code_bitmap
= NULL
;
556 p
->code_write_count
= 0;
559 /* set to NULL all the 'first_tb' fields in all PageDescs */
560 static void page_flush_tb(void)
565 for(i
= 0; i
< L1_SIZE
; i
++) {
568 for(j
= 0; j
< L2_SIZE
; j
++) {
570 invalidate_page_bitmap(p
);
577 /* flush all the translation blocks */
578 /* XXX: tb_flush is currently not thread safe */
579 void tb_flush(CPUState
*env1
)
582 #if defined(DEBUG_FLUSH)
583 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
584 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
586 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
588 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
589 cpu_abort(env1
, "Internal error: code buffer overflow\n");
593 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
594 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
597 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
600 code_gen_ptr
= code_gen_buffer
;
601 /* XXX: flush processor icache at this point if cache flush is
606 #ifdef DEBUG_TB_CHECK
608 static void tb_invalidate_check(target_ulong address
)
610 TranslationBlock
*tb
;
612 address
&= TARGET_PAGE_MASK
;
613 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
614 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
615 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
616 address
>= tb
->pc
+ tb
->size
)) {
617 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
618 address
, (long)tb
->pc
, tb
->size
);
624 /* verify that all the pages have correct rights for code */
625 static void tb_page_check(void)
627 TranslationBlock
*tb
;
628 int i
, flags1
, flags2
;
630 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
631 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
632 flags1
= page_get_flags(tb
->pc
);
633 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
634 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
635 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
636 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
642 static void tb_jmp_check(TranslationBlock
*tb
)
644 TranslationBlock
*tb1
;
647 /* suppress any remaining jumps to this TB */
651 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
654 tb1
= tb1
->jmp_next
[n1
];
656 /* check end of list */
658 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
664 /* invalidate one TB */
665 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
668 TranslationBlock
*tb1
;
672 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
675 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
679 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
681 TranslationBlock
*tb1
;
687 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
689 *ptb
= tb1
->page_next
[n1
];
692 ptb
= &tb1
->page_next
[n1
];
696 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
698 TranslationBlock
*tb1
, **ptb
;
701 ptb
= &tb
->jmp_next
[n
];
704 /* find tb(n) in circular list */
708 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
709 if (n1
== n
&& tb1
== tb
)
712 ptb
= &tb1
->jmp_first
;
714 ptb
= &tb1
->jmp_next
[n1
];
717 /* now we can suppress tb(n) from the list */
718 *ptb
= tb
->jmp_next
[n
];
720 tb
->jmp_next
[n
] = NULL
;
724 /* reset the jump entry 'n' of a TB so that it is not chained to
726 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
728 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
731 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
736 target_phys_addr_t phys_pc
;
737 TranslationBlock
*tb1
, *tb2
;
739 /* remove the TB from the hash list */
740 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
741 h
= tb_phys_hash_func(phys_pc
);
742 tb_remove(&tb_phys_hash
[h
], tb
,
743 offsetof(TranslationBlock
, phys_hash_next
));
745 /* remove the TB from the page list */
746 if (tb
->page_addr
[0] != page_addr
) {
747 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
748 tb_page_remove(&p
->first_tb
, tb
);
749 invalidate_page_bitmap(p
);
751 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
752 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
753 tb_page_remove(&p
->first_tb
, tb
);
754 invalidate_page_bitmap(p
);
757 tb_invalidated_flag
= 1;
759 /* remove the TB from the hash list */
760 h
= tb_jmp_cache_hash_func(tb
->pc
);
761 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
762 if (env
->tb_jmp_cache
[h
] == tb
)
763 env
->tb_jmp_cache
[h
] = NULL
;
766 /* suppress this TB from the two jump lists */
767 tb_jmp_remove(tb
, 0);
768 tb_jmp_remove(tb
, 1);
770 /* suppress any remaining jumps to this TB */
776 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
777 tb2
= tb1
->jmp_next
[n1
];
778 tb_reset_jump(tb1
, n1
);
779 tb1
->jmp_next
[n1
] = NULL
;
782 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
784 tb_phys_invalidate_count
++;
787 static inline void set_bits(uint8_t *tab
, int start
, int len
)
793 mask
= 0xff << (start
& 7);
794 if ((start
& ~7) == (end
& ~7)) {
796 mask
&= ~(0xff << (end
& 7));
801 start
= (start
+ 8) & ~7;
803 while (start
< end1
) {
808 mask
= ~(0xff << (end
& 7));
814 static void build_page_bitmap(PageDesc
*p
)
816 int n
, tb_start
, tb_end
;
817 TranslationBlock
*tb
;
819 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
826 tb
= (TranslationBlock
*)((long)tb
& ~3);
827 /* NOTE: this is subtle as a TB may span two physical pages */
829 /* NOTE: tb_end may be after the end of the page, but
830 it is not a problem */
831 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
832 tb_end
= tb_start
+ tb
->size
;
833 if (tb_end
> TARGET_PAGE_SIZE
)
834 tb_end
= TARGET_PAGE_SIZE
;
837 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
839 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
840 tb
= tb
->page_next
[n
];
844 TranslationBlock
*tb_gen_code(CPUState
*env
,
845 target_ulong pc
, target_ulong cs_base
,
846 int flags
, int cflags
)
848 TranslationBlock
*tb
;
850 target_ulong phys_pc
, phys_page2
, virt_page2
;
853 phys_pc
= get_phys_addr_code(env
, pc
);
856 /* flush must be done */
858 /* cannot fail at this point */
860 /* Don't forget to invalidate previous TB info. */
861 tb_invalidated_flag
= 1;
863 tc_ptr
= code_gen_ptr
;
865 tb
->cs_base
= cs_base
;
868 cpu_gen_code(env
, tb
, &code_gen_size
);
869 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
871 /* check next page if needed */
872 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
874 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
875 phys_page2
= get_phys_addr_code(env
, virt_page2
);
877 tb_link_phys(tb
, phys_pc
, phys_page2
);
881 /* invalidate all TBs which intersect with the target physical page
882 starting in range [start;end[. NOTE: start and end must refer to
883 the same physical page. 'is_cpu_write_access' should be true if called
884 from a real cpu write access: the virtual CPU will exit the current
885 TB if code is modified inside this TB. */
886 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
887 int is_cpu_write_access
)
889 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
890 CPUState
*env
= cpu_single_env
;
891 target_ulong tb_start
, tb_end
;
894 #ifdef TARGET_HAS_PRECISE_SMC
895 int current_tb_not_found
= is_cpu_write_access
;
896 TranslationBlock
*current_tb
= NULL
;
897 int current_tb_modified
= 0;
898 target_ulong current_pc
= 0;
899 target_ulong current_cs_base
= 0;
900 int current_flags
= 0;
901 #endif /* TARGET_HAS_PRECISE_SMC */
903 p
= page_find(start
>> TARGET_PAGE_BITS
);
906 if (!p
->code_bitmap
&&
907 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
908 is_cpu_write_access
) {
909 /* build code bitmap */
910 build_page_bitmap(p
);
913 /* we remove all the TBs in the range [start, end[ */
914 /* XXX: see if in some cases it could be faster to invalidate all the code */
918 tb
= (TranslationBlock
*)((long)tb
& ~3);
919 tb_next
= tb
->page_next
[n
];
920 /* NOTE: this is subtle as a TB may span two physical pages */
922 /* NOTE: tb_end may be after the end of the page, but
923 it is not a problem */
924 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
925 tb_end
= tb_start
+ tb
->size
;
927 tb_start
= tb
->page_addr
[1];
928 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
930 if (!(tb_end
<= start
|| tb_start
>= end
)) {
931 #ifdef TARGET_HAS_PRECISE_SMC
932 if (current_tb_not_found
) {
933 current_tb_not_found
= 0;
935 if (env
->mem_io_pc
) {
936 /* now we have a real cpu fault */
937 current_tb
= tb_find_pc(env
->mem_io_pc
);
940 if (current_tb
== tb
&&
941 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
942 /* If we are modifying the current TB, we must stop
943 its execution. We could be more precise by checking
944 that the modification is after the current PC, but it
945 would require a specialized function to partially
946 restore the CPU state */
948 current_tb_modified
= 1;
949 cpu_restore_state(current_tb
, env
,
950 env
->mem_io_pc
, NULL
);
951 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
954 #endif /* TARGET_HAS_PRECISE_SMC */
955 /* we need to do that to handle the case where a signal
956 occurs while doing tb_phys_invalidate() */
959 saved_tb
= env
->current_tb
;
960 env
->current_tb
= NULL
;
962 tb_phys_invalidate(tb
, -1);
964 env
->current_tb
= saved_tb
;
965 if (env
->interrupt_request
&& env
->current_tb
)
966 cpu_interrupt(env
, env
->interrupt_request
);
971 #if !defined(CONFIG_USER_ONLY)
972 /* if no code remaining, no need to continue to use slow writes */
974 invalidate_page_bitmap(p
);
975 if (is_cpu_write_access
) {
976 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
980 #ifdef TARGET_HAS_PRECISE_SMC
981 if (current_tb_modified
) {
982 /* we generate a block containing just the instruction
983 modifying the memory. It will ensure that it cannot modify
985 env
->current_tb
= NULL
;
986 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
987 cpu_resume_from_signal(env
, NULL
);
992 /* len must be <= 8 and start must be a multiple of len */
993 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1000 fprintf(logfile
, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1001 cpu_single_env
->mem_io_vaddr
, len
,
1002 cpu_single_env
->eip
,
1003 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1007 p
= page_find(start
>> TARGET_PAGE_BITS
);
1010 if (p
->code_bitmap
) {
1011 offset
= start
& ~TARGET_PAGE_MASK
;
1012 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1013 if (b
& ((1 << len
) - 1))
1017 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1021 #if !defined(CONFIG_SOFTMMU)
1022 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1023 unsigned long pc
, void *puc
)
1025 TranslationBlock
*tb
;
1028 #ifdef TARGET_HAS_PRECISE_SMC
1029 TranslationBlock
*current_tb
= NULL
;
1030 CPUState
*env
= cpu_single_env
;
1031 int current_tb_modified
= 0;
1032 target_ulong current_pc
= 0;
1033 target_ulong current_cs_base
= 0;
1034 int current_flags
= 0;
1037 addr
&= TARGET_PAGE_MASK
;
1038 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1042 #ifdef TARGET_HAS_PRECISE_SMC
1043 if (tb
&& pc
!= 0) {
1044 current_tb
= tb_find_pc(pc
);
1047 while (tb
!= NULL
) {
1049 tb
= (TranslationBlock
*)((long)tb
& ~3);
1050 #ifdef TARGET_HAS_PRECISE_SMC
1051 if (current_tb
== tb
&&
1052 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1053 /* If we are modifying the current TB, we must stop
1054 its execution. We could be more precise by checking
1055 that the modification is after the current PC, but it
1056 would require a specialized function to partially
1057 restore the CPU state */
1059 current_tb_modified
= 1;
1060 cpu_restore_state(current_tb
, env
, pc
, puc
);
1061 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1064 #endif /* TARGET_HAS_PRECISE_SMC */
1065 tb_phys_invalidate(tb
, addr
);
1066 tb
= tb
->page_next
[n
];
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_modified
) {
1071 /* we generate a block containing just the instruction
1072 modifying the memory. It will ensure that it cannot modify
1074 env
->current_tb
= NULL
;
1075 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1076 cpu_resume_from_signal(env
, puc
);
1082 /* add the tb in the target page and protect it if necessary */
1083 static inline void tb_alloc_page(TranslationBlock
*tb
,
1084 unsigned int n
, target_ulong page_addr
)
1087 TranslationBlock
*last_first_tb
;
1089 tb
->page_addr
[n
] = page_addr
;
1090 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1091 tb
->page_next
[n
] = p
->first_tb
;
1092 last_first_tb
= p
->first_tb
;
1093 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1094 invalidate_page_bitmap(p
);
1096 #if defined(TARGET_HAS_SMC) || 1
1098 #if defined(CONFIG_USER_ONLY)
1099 if (p
->flags
& PAGE_WRITE
) {
1104 /* force the host page as non writable (writes will have a
1105 page fault + mprotect overhead) */
1106 page_addr
&= qemu_host_page_mask
;
1108 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1109 addr
+= TARGET_PAGE_SIZE
) {
1111 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1115 p2
->flags
&= ~PAGE_WRITE
;
1116 page_get_flags(addr
);
1118 mprotect(g2h(page_addr
), qemu_host_page_size
,
1119 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1120 #ifdef DEBUG_TB_INVALIDATE
1121 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1126 /* if some code is already present, then the pages are already
1127 protected. So we handle the case where only the first TB is
1128 allocated in a physical page */
1129 if (!last_first_tb
) {
1130 tlb_protect_code(page_addr
);
1134 #endif /* TARGET_HAS_SMC */
1137 /* Allocate a new translation block. Flush the translation buffer if
1138 too many translation blocks or too much generated code. */
1139 TranslationBlock
*tb_alloc(target_ulong pc
)
1141 TranslationBlock
*tb
;
1143 if (nb_tbs
>= code_gen_max_blocks
||
1144 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1146 tb
= &tbs
[nb_tbs
++];
1152 void tb_free(TranslationBlock
*tb
)
1154 /* In practice this is mostly used for single use temporary TB
1155 Ignore the hard cases and just back up if this TB happens to
1156 be the last one generated. */
1157 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1158 code_gen_ptr
= tb
->tc_ptr
;
1163 /* add a new TB and link it to the physical page tables. phys_page2 is
1164 (-1) to indicate that only one page contains the TB. */
1165 void tb_link_phys(TranslationBlock
*tb
,
1166 target_ulong phys_pc
, target_ulong phys_page2
)
1169 TranslationBlock
**ptb
;
1171 /* Grab the mmap lock to stop another thread invalidating this TB
1172 before we are done. */
1174 /* add in the physical hash table */
1175 h
= tb_phys_hash_func(phys_pc
);
1176 ptb
= &tb_phys_hash
[h
];
1177 tb
->phys_hash_next
= *ptb
;
1180 /* add in the page list */
1181 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1182 if (phys_page2
!= -1)
1183 tb_alloc_page(tb
, 1, phys_page2
);
1185 tb
->page_addr
[1] = -1;
1187 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1188 tb
->jmp_next
[0] = NULL
;
1189 tb
->jmp_next
[1] = NULL
;
1191 /* init original jump addresses */
1192 if (tb
->tb_next_offset
[0] != 0xffff)
1193 tb_reset_jump(tb
, 0);
1194 if (tb
->tb_next_offset
[1] != 0xffff)
1195 tb_reset_jump(tb
, 1);
1197 #ifdef DEBUG_TB_CHECK
1203 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204 tb[1].tc_ptr. Return NULL if not found */
1205 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1207 int m_min
, m_max
, m
;
1209 TranslationBlock
*tb
;
1213 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1214 tc_ptr
>= (unsigned long)code_gen_ptr
)
1216 /* binary search (cf Knuth) */
1219 while (m_min
<= m_max
) {
1220 m
= (m_min
+ m_max
) >> 1;
1222 v
= (unsigned long)tb
->tc_ptr
;
1225 else if (tc_ptr
< v
) {
1234 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1236 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1238 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1241 tb1
= tb
->jmp_next
[n
];
1243 /* find head of list */
1246 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1249 tb1
= tb1
->jmp_next
[n1
];
1251 /* we are now sure now that tb jumps to tb1 */
1254 /* remove tb from the jmp_first list */
1255 ptb
= &tb_next
->jmp_first
;
1259 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1260 if (n1
== n
&& tb1
== tb
)
1262 ptb
= &tb1
->jmp_next
[n1
];
1264 *ptb
= tb
->jmp_next
[n
];
1265 tb
->jmp_next
[n
] = NULL
;
1267 /* suppress the jump to next tb in generated code */
1268 tb_reset_jump(tb
, n
);
1270 /* suppress jumps in the tb on which we could have jumped */
1271 tb_reset_jump_recursive(tb_next
);
1275 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1277 tb_reset_jump_recursive2(tb
, 0);
1278 tb_reset_jump_recursive2(tb
, 1);
1281 #if defined(TARGET_HAS_ICE)
1282 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1284 target_phys_addr_t addr
;
1286 ram_addr_t ram_addr
;
1289 addr
= cpu_get_phys_page_debug(env
, pc
);
1290 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1292 pd
= IO_MEM_UNASSIGNED
;
1294 pd
= p
->phys_offset
;
1296 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1297 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1301 /* Add a watchpoint. */
1302 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, int type
)
1306 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1307 if (addr
== env
->watchpoint
[i
].vaddr
)
1310 if (env
->nb_watchpoints
>= MAX_WATCHPOINTS
)
1313 i
= env
->nb_watchpoints
++;
1314 env
->watchpoint
[i
].vaddr
= addr
;
1315 env
->watchpoint
[i
].type
= type
;
1316 tlb_flush_page(env
, addr
);
1317 /* FIXME: This flush is needed because of the hack to make memory ops
1318 terminate the TB. It can be removed once the proper IO trap and
1319 re-execute bits are in. */
1324 /* Remove a watchpoint. */
1325 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
)
1329 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1330 if (addr
== env
->watchpoint
[i
].vaddr
) {
1331 env
->nb_watchpoints
--;
1332 env
->watchpoint
[i
] = env
->watchpoint
[env
->nb_watchpoints
];
1333 tlb_flush_page(env
, addr
);
1340 /* Remove all watchpoints. */
1341 void cpu_watchpoint_remove_all(CPUState
*env
) {
1344 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1345 tlb_flush_page(env
, env
->watchpoint
[i
].vaddr
);
1347 env
->nb_watchpoints
= 0;
1350 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1351 breakpoint is reached */
1352 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
)
1354 #if defined(TARGET_HAS_ICE)
1357 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1358 if (env
->breakpoints
[i
] == pc
)
1362 if (env
->nb_breakpoints
>= MAX_BREAKPOINTS
)
1364 env
->breakpoints
[env
->nb_breakpoints
++] = pc
;
1366 breakpoint_invalidate(env
, pc
);
1373 /* remove all breakpoints */
1374 void cpu_breakpoint_remove_all(CPUState
*env
) {
1375 #if defined(TARGET_HAS_ICE)
1377 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1378 breakpoint_invalidate(env
, env
->breakpoints
[i
]);
1380 env
->nb_breakpoints
= 0;
1384 /* remove a breakpoint */
1385 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
)
1387 #if defined(TARGET_HAS_ICE)
1389 for(i
= 0; i
< env
->nb_breakpoints
; i
++) {
1390 if (env
->breakpoints
[i
] == pc
)
1395 env
->nb_breakpoints
--;
1396 if (i
< env
->nb_breakpoints
)
1397 env
->breakpoints
[i
] = env
->breakpoints
[env
->nb_breakpoints
];
1399 breakpoint_invalidate(env
, pc
);
1406 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1407 CPU loop after each instruction */
1408 void cpu_single_step(CPUState
*env
, int enabled
)
1410 #if defined(TARGET_HAS_ICE)
1411 if (env
->singlestep_enabled
!= enabled
) {
1412 env
->singlestep_enabled
= enabled
;
1413 /* must flush all the translated code to avoid inconsistancies */
1414 /* XXX: only flush what is necessary */
1420 /* enable or disable low levels log */
1421 void cpu_set_log(int log_flags
)
1423 loglevel
= log_flags
;
1424 if (loglevel
&& !logfile
) {
1425 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1427 perror(logfilename
);
1430 #if !defined(CONFIG_SOFTMMU)
1431 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1433 static char logfile_buf
[4096];
1434 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1437 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1441 if (!loglevel
&& logfile
) {
1447 void cpu_set_log_filename(const char *filename
)
1449 logfilename
= strdup(filename
);
1454 cpu_set_log(loglevel
);
1457 /* mask must never be zero, except for A20 change call */
1458 void cpu_interrupt(CPUState
*env
, int mask
)
1460 #if !defined(USE_NPTL)
1461 TranslationBlock
*tb
;
1462 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1466 old_mask
= env
->interrupt_request
;
1467 /* FIXME: This is probably not threadsafe. A different thread could
1468 be in the middle of a read-modify-write operation. */
1469 env
->interrupt_request
|= mask
;
1470 #if defined(USE_NPTL)
1471 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1472 problem and hope the cpu will stop of its own accord. For userspace
1473 emulation this often isn't actually as bad as it sounds. Often
1474 signals are used primarily to interrupt blocking syscalls. */
1477 env
->icount_decr
.u16
.high
= 0xffff;
1478 #ifndef CONFIG_USER_ONLY
1479 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1480 an async event happened and we need to process it. */
1482 && (mask
& ~(old_mask
| CPU_INTERRUPT_EXIT
)) != 0) {
1483 cpu_abort(env
, "Raised interrupt while not in I/O function");
1487 tb
= env
->current_tb
;
1488 /* if the cpu is currently executing code, we must unlink it and
1489 all the potentially executing TB */
1490 if (tb
&& !testandset(&interrupt_lock
)) {
1491 env
->current_tb
= NULL
;
1492 tb_reset_jump_recursive(tb
);
1493 resetlock(&interrupt_lock
);
1499 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1501 env
->interrupt_request
&= ~mask
;
1504 const CPULogItem cpu_log_items
[] = {
1505 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1506 "show generated host assembly code for each compiled TB" },
1507 { CPU_LOG_TB_IN_ASM
, "in_asm",
1508 "show target assembly code for each compiled TB" },
1509 { CPU_LOG_TB_OP
, "op",
1510 "show micro ops for each compiled TB" },
1511 { CPU_LOG_TB_OP_OPT
, "op_opt",
1514 "before eflags optimization and "
1516 "after liveness analysis" },
1517 { CPU_LOG_INT
, "int",
1518 "show interrupts/exceptions in short format" },
1519 { CPU_LOG_EXEC
, "exec",
1520 "show trace before each executed TB (lots of logs)" },
1521 { CPU_LOG_TB_CPU
, "cpu",
1522 "show CPU state before block translation" },
1524 { CPU_LOG_PCALL
, "pcall",
1525 "show protected mode far calls/returns/exceptions" },
1528 { CPU_LOG_IOPORT
, "ioport",
1529 "show all i/o ports accesses" },
1534 static int cmp1(const char *s1
, int n
, const char *s2
)
1536 if (strlen(s2
) != n
)
1538 return memcmp(s1
, s2
, n
) == 0;
1541 /* takes a comma separated list of log masks. Return 0 if error. */
1542 int cpu_str_to_log_mask(const char *str
)
1544 const CPULogItem
*item
;
1551 p1
= strchr(p
, ',');
1554 if(cmp1(p
,p1
-p
,"all")) {
1555 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1559 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1560 if (cmp1(p
, p1
- p
, item
->name
))
1574 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1581 fprintf(stderr
, "qemu: fatal: ");
1582 vfprintf(stderr
, fmt
, ap
);
1583 fprintf(stderr
, "\n");
1585 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1587 cpu_dump_state(env
, stderr
, fprintf
, 0);
1590 fprintf(logfile
, "qemu: fatal: ");
1591 vfprintf(logfile
, fmt
, ap2
);
1592 fprintf(logfile
, "\n");
1594 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1596 cpu_dump_state(env
, logfile
, fprintf
, 0);
1606 CPUState
*cpu_copy(CPUState
*env
)
1608 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1609 /* preserve chaining and index */
1610 CPUState
*next_cpu
= new_env
->next_cpu
;
1611 int cpu_index
= new_env
->cpu_index
;
1612 memcpy(new_env
, env
, sizeof(CPUState
));
1613 new_env
->next_cpu
= next_cpu
;
1614 new_env
->cpu_index
= cpu_index
;
1618 #if !defined(CONFIG_USER_ONLY)
1620 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1624 /* Discard jump cache entries for any tb which might potentially
1625 overlap the flushed page. */
1626 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1627 memset (&env
->tb_jmp_cache
[i
], 0,
1628 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1630 i
= tb_jmp_cache_hash_page(addr
);
1631 memset (&env
->tb_jmp_cache
[i
], 0,
1632 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1635 /* NOTE: if flush_global is true, also flush global entries (not
1637 void tlb_flush(CPUState
*env
, int flush_global
)
1641 #if defined(DEBUG_TLB)
1642 printf("tlb_flush:\n");
1644 /* must reset current TB so that interrupts cannot modify the
1645 links while we are modifying them */
1646 env
->current_tb
= NULL
;
1648 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1649 env
->tlb_table
[0][i
].addr_read
= -1;
1650 env
->tlb_table
[0][i
].addr_write
= -1;
1651 env
->tlb_table
[0][i
].addr_code
= -1;
1652 env
->tlb_table
[1][i
].addr_read
= -1;
1653 env
->tlb_table
[1][i
].addr_write
= -1;
1654 env
->tlb_table
[1][i
].addr_code
= -1;
1655 #if (NB_MMU_MODES >= 3)
1656 env
->tlb_table
[2][i
].addr_read
= -1;
1657 env
->tlb_table
[2][i
].addr_write
= -1;
1658 env
->tlb_table
[2][i
].addr_code
= -1;
1659 #if (NB_MMU_MODES == 4)
1660 env
->tlb_table
[3][i
].addr_read
= -1;
1661 env
->tlb_table
[3][i
].addr_write
= -1;
1662 env
->tlb_table
[3][i
].addr_code
= -1;
1667 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1670 if (env
->kqemu_enabled
) {
1671 kqemu_flush(env
, flush_global
);
1677 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1679 if (addr
== (tlb_entry
->addr_read
&
1680 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1681 addr
== (tlb_entry
->addr_write
&
1682 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1683 addr
== (tlb_entry
->addr_code
&
1684 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1685 tlb_entry
->addr_read
= -1;
1686 tlb_entry
->addr_write
= -1;
1687 tlb_entry
->addr_code
= -1;
1691 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1695 #if defined(DEBUG_TLB)
1696 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1698 /* must reset current TB so that interrupts cannot modify the
1699 links while we are modifying them */
1700 env
->current_tb
= NULL
;
1702 addr
&= TARGET_PAGE_MASK
;
1703 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1704 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1705 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1706 #if (NB_MMU_MODES >= 3)
1707 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1708 #if (NB_MMU_MODES == 4)
1709 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1713 tlb_flush_jmp_cache(env
, addr
);
1716 if (env
->kqemu_enabled
) {
1717 kqemu_flush_page(env
, addr
);
1722 /* update the TLBs so that writes to code in the virtual page 'addr'
1724 static void tlb_protect_code(ram_addr_t ram_addr
)
1726 cpu_physical_memory_reset_dirty(ram_addr
,
1727 ram_addr
+ TARGET_PAGE_SIZE
,
1731 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1732 tested for self modifying code */
1733 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1736 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1739 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1740 unsigned long start
, unsigned long length
)
1743 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1744 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1745 if ((addr
- start
) < length
) {
1746 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1751 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1755 unsigned long length
, start1
;
1759 start
&= TARGET_PAGE_MASK
;
1760 end
= TARGET_PAGE_ALIGN(end
);
1762 length
= end
- start
;
1765 len
= length
>> TARGET_PAGE_BITS
;
1767 /* XXX: should not depend on cpu context */
1769 if (env
->kqemu_enabled
) {
1772 for(i
= 0; i
< len
; i
++) {
1773 kqemu_set_notdirty(env
, addr
);
1774 addr
+= TARGET_PAGE_SIZE
;
1778 mask
= ~dirty_flags
;
1779 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1780 for(i
= 0; i
< len
; i
++)
1783 /* we modify the TLB cache so that the dirty bit will be set again
1784 when accessing the range */
1785 start1
= start
+ (unsigned long)phys_ram_base
;
1786 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1787 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1788 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1789 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1790 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1791 #if (NB_MMU_MODES >= 3)
1792 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1793 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1794 #if (NB_MMU_MODES == 4)
1795 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1796 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1802 int cpu_physical_memory_set_dirty_tracking(int enable
)
1804 in_migration
= enable
;
1808 int cpu_physical_memory_get_dirty_tracking(void)
1810 return in_migration
;
1813 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1815 ram_addr_t ram_addr
;
1817 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1818 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1819 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1820 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1821 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1826 /* update the TLB according to the current state of the dirty bits */
1827 void cpu_tlb_update_dirty(CPUState
*env
)
1830 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1831 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1832 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1833 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1834 #if (NB_MMU_MODES >= 3)
1835 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1836 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1837 #if (NB_MMU_MODES == 4)
1838 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1839 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1844 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1846 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1847 tlb_entry
->addr_write
= vaddr
;
1850 /* update the TLB corresponding to virtual page vaddr
1851 so that it is no longer dirty */
1852 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1856 vaddr
&= TARGET_PAGE_MASK
;
1857 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1858 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1859 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1860 #if (NB_MMU_MODES >= 3)
1861 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1862 #if (NB_MMU_MODES == 4)
1863 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1868 /* add a new TLB entry. At most one entry for a given virtual address
1869 is permitted. Return 0 if OK or 2 if the page could not be mapped
1870 (can only happen in non SOFTMMU mode for I/O pages or pages
1871 conflicting with the host address space). */
1872 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1873 target_phys_addr_t paddr
, int prot
,
1874 int mmu_idx
, int is_softmmu
)
1879 target_ulong address
;
1880 target_ulong code_address
;
1881 target_phys_addr_t addend
;
1885 target_phys_addr_t iotlb
;
1887 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1889 pd
= IO_MEM_UNASSIGNED
;
1891 pd
= p
->phys_offset
;
1893 #if defined(DEBUG_TLB)
1894 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1895 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1900 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1901 /* IO memory case (romd handled later) */
1902 address
|= TLB_MMIO
;
1904 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1905 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1907 iotlb
= pd
& TARGET_PAGE_MASK
;
1908 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1909 iotlb
|= IO_MEM_NOTDIRTY
;
1911 iotlb
|= IO_MEM_ROM
;
1913 /* IO handlers are currently passed a phsical address.
1914 It would be nice to pass an offset from the base address
1915 of that region. This would avoid having to special case RAM,
1916 and avoid full address decoding in every device.
1917 We can't use the high bits of pd for this because
1918 IO_MEM_ROMD uses these as a ram address. */
1919 iotlb
= (pd
& ~TARGET_PAGE_MASK
) + paddr
;
1922 code_address
= address
;
1923 /* Make accesses to pages with watchpoints go via the
1924 watchpoint trap routines. */
1925 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
1926 if (vaddr
== (env
->watchpoint
[i
].vaddr
& TARGET_PAGE_MASK
)) {
1927 iotlb
= io_mem_watch
+ paddr
;
1928 /* TODO: The memory case can be optimized by not trapping
1929 reads of pages with a write breakpoint. */
1930 address
|= TLB_MMIO
;
1934 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1935 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
1936 te
= &env
->tlb_table
[mmu_idx
][index
];
1937 te
->addend
= addend
- vaddr
;
1938 if (prot
& PAGE_READ
) {
1939 te
->addr_read
= address
;
1944 if (prot
& PAGE_EXEC
) {
1945 te
->addr_code
= code_address
;
1949 if (prot
& PAGE_WRITE
) {
1950 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
1951 (pd
& IO_MEM_ROMD
)) {
1952 /* Write access calls the I/O callback. */
1953 te
->addr_write
= address
| TLB_MMIO
;
1954 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
1955 !cpu_physical_memory_is_dirty(pd
)) {
1956 te
->addr_write
= address
| TLB_NOTDIRTY
;
1958 te
->addr_write
= address
;
1961 te
->addr_write
= -1;
1968 void tlb_flush(CPUState
*env
, int flush_global
)
1972 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1976 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1977 target_phys_addr_t paddr
, int prot
,
1978 int mmu_idx
, int is_softmmu
)
1983 /* dump memory mappings */
1984 void page_dump(FILE *f
)
1986 unsigned long start
, end
;
1987 int i
, j
, prot
, prot1
;
1990 fprintf(f
, "%-8s %-8s %-8s %s\n",
1991 "start", "end", "size", "prot");
1995 for(i
= 0; i
<= L1_SIZE
; i
++) {
2000 for(j
= 0;j
< L2_SIZE
; j
++) {
2005 if (prot1
!= prot
) {
2006 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2008 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2009 start
, end
, end
- start
,
2010 prot
& PAGE_READ
? 'r' : '-',
2011 prot
& PAGE_WRITE
? 'w' : '-',
2012 prot
& PAGE_EXEC
? 'x' : '-');
2026 int page_get_flags(target_ulong address
)
2030 p
= page_find(address
>> TARGET_PAGE_BITS
);
2036 /* modify the flags of a page and invalidate the code if
2037 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2038 depending on PAGE_WRITE */
2039 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2044 /* mmap_lock should already be held. */
2045 start
= start
& TARGET_PAGE_MASK
;
2046 end
= TARGET_PAGE_ALIGN(end
);
2047 if (flags
& PAGE_WRITE
)
2048 flags
|= PAGE_WRITE_ORG
;
2049 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2050 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2051 /* We may be called for host regions that are outside guest
2055 /* if the write protection is set, then we invalidate the code
2057 if (!(p
->flags
& PAGE_WRITE
) &&
2058 (flags
& PAGE_WRITE
) &&
2060 tb_invalidate_phys_page(addr
, 0, NULL
);
2066 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2072 if (start
+ len
< start
)
2073 /* we've wrapped around */
2076 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2077 start
= start
& TARGET_PAGE_MASK
;
2079 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2080 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2083 if( !(p
->flags
& PAGE_VALID
) )
2086 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2088 if (flags
& PAGE_WRITE
) {
2089 if (!(p
->flags
& PAGE_WRITE_ORG
))
2091 /* unprotect the page if it was put read-only because it
2092 contains translated code */
2093 if (!(p
->flags
& PAGE_WRITE
)) {
2094 if (!page_unprotect(addr
, 0, NULL
))
2103 /* called from signal handler: invalidate the code and unprotect the
2104 page. Return TRUE if the fault was succesfully handled. */
2105 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2107 unsigned int page_index
, prot
, pindex
;
2109 target_ulong host_start
, host_end
, addr
;
2111 /* Technically this isn't safe inside a signal handler. However we
2112 know this only ever happens in a synchronous SEGV handler, so in
2113 practice it seems to be ok. */
2116 host_start
= address
& qemu_host_page_mask
;
2117 page_index
= host_start
>> TARGET_PAGE_BITS
;
2118 p1
= page_find(page_index
);
2123 host_end
= host_start
+ qemu_host_page_size
;
2126 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2130 /* if the page was really writable, then we change its
2131 protection back to writable */
2132 if (prot
& PAGE_WRITE_ORG
) {
2133 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2134 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2135 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2136 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2137 p1
[pindex
].flags
|= PAGE_WRITE
;
2138 /* and since the content will be modified, we must invalidate
2139 the corresponding translated code. */
2140 tb_invalidate_phys_page(address
, pc
, puc
);
2141 #ifdef DEBUG_TB_CHECK
2142 tb_invalidate_check(address
);
2152 static inline void tlb_set_dirty(CPUState
*env
,
2153 unsigned long addr
, target_ulong vaddr
)
2156 #endif /* defined(CONFIG_USER_ONLY) */
2158 #if !defined(CONFIG_USER_ONLY)
2159 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2161 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2162 ram_addr_t orig_memory
);
2163 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2166 if (addr > start_addr) \
2169 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2170 if (start_addr2 > 0) \
2174 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2175 end_addr2 = TARGET_PAGE_SIZE - 1; \
2177 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2178 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2183 /* register physical memory. 'size' must be a multiple of the target
2184 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2186 void cpu_register_physical_memory(target_phys_addr_t start_addr
,
2188 ram_addr_t phys_offset
)
2190 target_phys_addr_t addr
, end_addr
;
2193 ram_addr_t orig_size
= size
;
2197 /* XXX: should not depend on cpu context */
2199 if (env
->kqemu_enabled
) {
2200 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2204 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2206 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2207 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2208 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2209 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2210 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2211 ram_addr_t orig_memory
= p
->phys_offset
;
2212 target_phys_addr_t start_addr2
, end_addr2
;
2213 int need_subpage
= 0;
2215 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2217 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2218 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2219 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2220 &p
->phys_offset
, orig_memory
);
2222 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2225 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
);
2227 p
->phys_offset
= phys_offset
;
2228 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2229 (phys_offset
& IO_MEM_ROMD
))
2230 phys_offset
+= TARGET_PAGE_SIZE
;
2233 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2234 p
->phys_offset
= phys_offset
;
2235 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2236 (phys_offset
& IO_MEM_ROMD
))
2237 phys_offset
+= TARGET_PAGE_SIZE
;
2239 target_phys_addr_t start_addr2
, end_addr2
;
2240 int need_subpage
= 0;
2242 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2243 end_addr2
, need_subpage
);
2245 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2246 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2247 &p
->phys_offset
, IO_MEM_UNASSIGNED
);
2248 subpage_register(subpage
, start_addr2
, end_addr2
,
2255 /* since each CPU stores ram addresses in its TLB cache, we must
2256 reset the modified entries */
2258 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2263 /* XXX: temporary until new memory mapping API */
2264 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2268 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2270 return IO_MEM_UNASSIGNED
;
2271 return p
->phys_offset
;
2274 /* XXX: better than nothing */
2275 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2278 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2279 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2280 (uint64_t)size
, (uint64_t)phys_ram_size
);
2283 addr
= phys_ram_alloc_offset
;
2284 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2288 void qemu_ram_free(ram_addr_t addr
)
2292 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2294 #ifdef DEBUG_UNASSIGNED
2295 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2297 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2298 do_unassigned_access(addr
, 0, 0, 0, 1);
2303 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2305 #ifdef DEBUG_UNASSIGNED
2306 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2308 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2309 do_unassigned_access(addr
, 0, 0, 0, 2);
2314 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2316 #ifdef DEBUG_UNASSIGNED
2317 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2319 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2320 do_unassigned_access(addr
, 0, 0, 0, 4);
2325 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2327 #ifdef DEBUG_UNASSIGNED
2328 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2330 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2331 do_unassigned_access(addr
, 1, 0, 0, 1);
2335 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2337 #ifdef DEBUG_UNASSIGNED
2338 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2340 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2341 do_unassigned_access(addr
, 1, 0, 0, 2);
2345 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2347 #ifdef DEBUG_UNASSIGNED
2348 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2350 #if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2351 do_unassigned_access(addr
, 1, 0, 0, 4);
2355 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2356 unassigned_mem_readb
,
2357 unassigned_mem_readw
,
2358 unassigned_mem_readl
,
2361 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2362 unassigned_mem_writeb
,
2363 unassigned_mem_writew
,
2364 unassigned_mem_writel
,
2367 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2371 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2372 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2373 #if !defined(CONFIG_USER_ONLY)
2374 tb_invalidate_phys_page_fast(ram_addr
, 1);
2375 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2378 stb_p(phys_ram_base
+ ram_addr
, val
);
2380 if (cpu_single_env
->kqemu_enabled
&&
2381 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2382 kqemu_modify_page(cpu_single_env
, ram_addr
);
2384 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2385 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2386 /* we remove the notdirty callback only if the code has been
2388 if (dirty_flags
== 0xff)
2389 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2392 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2396 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2397 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2398 #if !defined(CONFIG_USER_ONLY)
2399 tb_invalidate_phys_page_fast(ram_addr
, 2);
2400 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2403 stw_p(phys_ram_base
+ ram_addr
, val
);
2405 if (cpu_single_env
->kqemu_enabled
&&
2406 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2407 kqemu_modify_page(cpu_single_env
, ram_addr
);
2409 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2410 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2411 /* we remove the notdirty callback only if the code has been
2413 if (dirty_flags
== 0xff)
2414 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2417 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2421 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2422 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2423 #if !defined(CONFIG_USER_ONLY)
2424 tb_invalidate_phys_page_fast(ram_addr
, 4);
2425 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2428 stl_p(phys_ram_base
+ ram_addr
, val
);
2430 if (cpu_single_env
->kqemu_enabled
&&
2431 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2432 kqemu_modify_page(cpu_single_env
, ram_addr
);
2434 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2435 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2436 /* we remove the notdirty callback only if the code has been
2438 if (dirty_flags
== 0xff)
2439 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2442 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2443 NULL
, /* never used */
2444 NULL
, /* never used */
2445 NULL
, /* never used */
2448 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2449 notdirty_mem_writeb
,
2450 notdirty_mem_writew
,
2451 notdirty_mem_writel
,
2454 /* Generate a debug exception if a watchpoint has been hit. */
2455 static void check_watchpoint(int offset
, int flags
)
2457 CPUState
*env
= cpu_single_env
;
2461 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2462 for (i
= 0; i
< env
->nb_watchpoints
; i
++) {
2463 if (vaddr
== env
->watchpoint
[i
].vaddr
2464 && (env
->watchpoint
[i
].type
& flags
)) {
2465 env
->watchpoint_hit
= i
+ 1;
2466 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2472 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2473 so these check for a hit then pass through to the normal out-of-line
2475 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2477 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2478 return ldub_phys(addr
);
2481 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2483 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2484 return lduw_phys(addr
);
2487 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2489 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_READ
);
2490 return ldl_phys(addr
);
2493 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2496 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2497 stb_phys(addr
, val
);
2500 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2503 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2504 stw_phys(addr
, val
);
2507 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2510 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, PAGE_WRITE
);
2511 stl_phys(addr
, val
);
2514 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2520 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2526 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2532 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2533 #if defined(DEBUG_SUBPAGE)
2534 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2535 mmio
, len
, addr
, idx
);
2537 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
], addr
);
2542 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2543 uint32_t value
, unsigned int len
)
2547 idx
= SUBPAGE_IDX(addr
- mmio
->base
);
2548 #if defined(DEBUG_SUBPAGE)
2549 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2550 mmio
, len
, addr
, idx
, value
);
2552 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
], addr
, value
);
2555 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2557 #if defined(DEBUG_SUBPAGE)
2558 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2561 return subpage_readlen(opaque
, addr
, 0);
2564 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2567 #if defined(DEBUG_SUBPAGE)
2568 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2570 subpage_writelen(opaque
, addr
, value
, 0);
2573 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2575 #if defined(DEBUG_SUBPAGE)
2576 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2579 return subpage_readlen(opaque
, addr
, 1);
2582 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2585 #if defined(DEBUG_SUBPAGE)
2586 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2588 subpage_writelen(opaque
, addr
, value
, 1);
2591 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2593 #if defined(DEBUG_SUBPAGE)
2594 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2597 return subpage_readlen(opaque
, addr
, 2);
2600 static void subpage_writel (void *opaque
,
2601 target_phys_addr_t addr
, uint32_t value
)
2603 #if defined(DEBUG_SUBPAGE)
2604 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2606 subpage_writelen(opaque
, addr
, value
, 2);
2609 static CPUReadMemoryFunc
*subpage_read
[] = {
2615 static CPUWriteMemoryFunc
*subpage_write
[] = {
2621 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2627 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2629 idx
= SUBPAGE_IDX(start
);
2630 eidx
= SUBPAGE_IDX(end
);
2631 #if defined(DEBUG_SUBPAGE)
2632 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2633 mmio
, start
, end
, idx
, eidx
, memory
);
2635 memory
>>= IO_MEM_SHIFT
;
2636 for (; idx
<= eidx
; idx
++) {
2637 for (i
= 0; i
< 4; i
++) {
2638 if (io_mem_read
[memory
][i
]) {
2639 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2640 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2642 if (io_mem_write
[memory
][i
]) {
2643 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2644 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2652 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2653 ram_addr_t orig_memory
)
2658 mmio
= qemu_mallocz(sizeof(subpage_t
));
2661 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2662 #if defined(DEBUG_SUBPAGE)
2663 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2664 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2666 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2667 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
);
2673 static void io_mem_init(void)
2675 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2676 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2677 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2680 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2681 watch_mem_write
, NULL
);
2682 /* alloc dirty bits array */
2683 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2684 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2687 /* mem_read and mem_write are arrays of functions containing the
2688 function to access byte (index 0), word (index 1) and dword (index
2689 2). Functions can be omitted with a NULL function pointer. The
2690 registered functions may be modified dynamically later.
2691 If io_index is non zero, the corresponding io zone is
2692 modified. If it is zero, a new io zone is allocated. The return
2693 value can be used with cpu_register_physical_memory(). (-1) is
2694 returned if error. */
2695 int cpu_register_io_memory(int io_index
,
2696 CPUReadMemoryFunc
**mem_read
,
2697 CPUWriteMemoryFunc
**mem_write
,
2700 int i
, subwidth
= 0;
2702 if (io_index
<= 0) {
2703 if (io_mem_nb
>= IO_MEM_NB_ENTRIES
)
2705 io_index
= io_mem_nb
++;
2707 if (io_index
>= IO_MEM_NB_ENTRIES
)
2711 for(i
= 0;i
< 3; i
++) {
2712 if (!mem_read
[i
] || !mem_write
[i
])
2713 subwidth
= IO_MEM_SUBWIDTH
;
2714 io_mem_read
[io_index
][i
] = mem_read
[i
];
2715 io_mem_write
[io_index
][i
] = mem_write
[i
];
2717 io_mem_opaque
[io_index
] = opaque
;
2718 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2721 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2723 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2726 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2728 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2731 #endif /* !defined(CONFIG_USER_ONLY) */
2733 /* physical memory access (slow version, mainly for debug) */
2734 #if defined(CONFIG_USER_ONLY)
2735 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2736 int len
, int is_write
)
2743 page
= addr
& TARGET_PAGE_MASK
;
2744 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2747 flags
= page_get_flags(page
);
2748 if (!(flags
& PAGE_VALID
))
2751 if (!(flags
& PAGE_WRITE
))
2753 /* XXX: this code should not depend on lock_user */
2754 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2755 /* FIXME - should this return an error rather than just fail? */
2758 unlock_user(p
, addr
, l
);
2760 if (!(flags
& PAGE_READ
))
2762 /* XXX: this code should not depend on lock_user */
2763 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2764 /* FIXME - should this return an error rather than just fail? */
2767 unlock_user(p
, addr
, 0);
2776 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2777 int len
, int is_write
)
2782 target_phys_addr_t page
;
2787 page
= addr
& TARGET_PAGE_MASK
;
2788 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2791 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2793 pd
= IO_MEM_UNASSIGNED
;
2795 pd
= p
->phys_offset
;
2799 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2800 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2801 /* XXX: could force cpu_single_env to NULL to avoid
2803 if (l
>= 4 && ((addr
& 3) == 0)) {
2804 /* 32 bit write access */
2806 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
2808 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2809 /* 16 bit write access */
2811 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
2814 /* 8 bit write access */
2816 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr
, val
);
2820 unsigned long addr1
;
2821 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2823 ptr
= phys_ram_base
+ addr1
;
2824 memcpy(ptr
, buf
, l
);
2825 if (!cpu_physical_memory_is_dirty(addr1
)) {
2826 /* invalidate code */
2827 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2829 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2830 (0xff & ~CODE_DIRTY_FLAG
);
2834 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2835 !(pd
& IO_MEM_ROMD
)) {
2837 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2838 if (l
>= 4 && ((addr
& 3) == 0)) {
2839 /* 32 bit read access */
2840 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2843 } else if (l
>= 2 && ((addr
& 1) == 0)) {
2844 /* 16 bit read access */
2845 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
2849 /* 8 bit read access */
2850 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr
);
2856 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2857 (addr
& ~TARGET_PAGE_MASK
);
2858 memcpy(buf
, ptr
, l
);
2867 /* used for ROM loading : can write in RAM and ROM */
2868 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
2869 const uint8_t *buf
, int len
)
2873 target_phys_addr_t page
;
2878 page
= addr
& TARGET_PAGE_MASK
;
2879 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2882 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2884 pd
= IO_MEM_UNASSIGNED
;
2886 pd
= p
->phys_offset
;
2889 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
2890 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
2891 !(pd
& IO_MEM_ROMD
)) {
2894 unsigned long addr1
;
2895 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2897 ptr
= phys_ram_base
+ addr1
;
2898 memcpy(ptr
, buf
, l
);
2907 /* warning: addr must be aligned */
2908 uint32_t ldl_phys(target_phys_addr_t addr
)
2916 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2918 pd
= IO_MEM_UNASSIGNED
;
2920 pd
= p
->phys_offset
;
2923 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2924 !(pd
& IO_MEM_ROMD
)) {
2926 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2927 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2930 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2931 (addr
& ~TARGET_PAGE_MASK
);
2937 /* warning: addr must be aligned */
2938 uint64_t ldq_phys(target_phys_addr_t addr
)
2946 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2948 pd
= IO_MEM_UNASSIGNED
;
2950 pd
= p
->phys_offset
;
2953 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
2954 !(pd
& IO_MEM_ROMD
)) {
2956 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2957 #ifdef TARGET_WORDS_BIGENDIAN
2958 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
2959 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
2961 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
2962 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
2966 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
2967 (addr
& ~TARGET_PAGE_MASK
);
2974 uint32_t ldub_phys(target_phys_addr_t addr
)
2977 cpu_physical_memory_read(addr
, &val
, 1);
2982 uint32_t lduw_phys(target_phys_addr_t addr
)
2985 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
2986 return tswap16(val
);
2989 /* warning: addr must be aligned. The ram page is not masked as dirty
2990 and the code inside is not invalidated. It is useful if the dirty
2991 bits are used to track modified PTEs */
2992 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
2999 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3001 pd
= IO_MEM_UNASSIGNED
;
3003 pd
= p
->phys_offset
;
3006 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3007 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3008 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3010 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3011 ptr
= phys_ram_base
+ addr1
;
3014 if (unlikely(in_migration
)) {
3015 if (!cpu_physical_memory_is_dirty(addr1
)) {
3016 /* invalidate code */
3017 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3019 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3020 (0xff & ~CODE_DIRTY_FLAG
);
3026 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3033 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3035 pd
= IO_MEM_UNASSIGNED
;
3037 pd
= p
->phys_offset
;
3040 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3041 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3042 #ifdef TARGET_WORDS_BIGENDIAN
3043 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3044 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3046 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3047 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3050 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3051 (addr
& ~TARGET_PAGE_MASK
);
3056 /* warning: addr must be aligned */
3057 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3064 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3066 pd
= IO_MEM_UNASSIGNED
;
3068 pd
= p
->phys_offset
;
3071 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3072 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3073 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3075 unsigned long addr1
;
3076 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3078 ptr
= phys_ram_base
+ addr1
;
3080 if (!cpu_physical_memory_is_dirty(addr1
)) {
3081 /* invalidate code */
3082 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3084 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3085 (0xff & ~CODE_DIRTY_FLAG
);
3091 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3094 cpu_physical_memory_write(addr
, &v
, 1);
3098 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3100 uint16_t v
= tswap16(val
);
3101 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3105 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3108 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3113 /* virtual memory access for debug */
3114 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3115 uint8_t *buf
, int len
, int is_write
)
3118 target_phys_addr_t phys_addr
;
3122 page
= addr
& TARGET_PAGE_MASK
;
3123 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3124 /* if no physical page mapped, return an error */
3125 if (phys_addr
== -1)
3127 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3130 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3139 /* in deterministic execution mode, instructions doing device I/Os
3140 must be at the end of the TB */
3141 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3143 TranslationBlock
*tb
;
3145 target_ulong pc
, cs_base
;
3148 tb
= tb_find_pc((unsigned long)retaddr
);
3150 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3153 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3154 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3155 /* Calculate how many instructions had been executed before the fault
3157 n
= n
- env
->icount_decr
.u16
.low
;
3158 /* Generate a new TB ending on the I/O insn. */
3160 /* On MIPS and SH, delay slot instructions can only be restarted if
3161 they were already the first instruction in the TB. If this is not
3162 the first instruction in a TB then re-execute the preceding
3164 #if defined(TARGET_MIPS)
3165 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3166 env
->active_tc
.PC
-= 4;
3167 env
->icount_decr
.u16
.low
++;
3168 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3170 #elif defined(TARGET_SH4)
3171 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3174 env
->icount_decr
.u16
.low
++;
3175 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3178 /* This should never happen. */
3179 if (n
> CF_COUNT_MASK
)
3180 cpu_abort(env
, "TB too big during recompile");
3182 cflags
= n
| CF_LAST_IO
;
3184 cs_base
= tb
->cs_base
;
3186 tb_phys_invalidate(tb
, -1);
3187 /* FIXME: In theory this could raise an exception. In practice
3188 we have already translated the block once so it's probably ok. */
3189 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3190 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3191 the first in the TB) then we end up generating a whole new TB and
3192 repeating the fault, which is horribly inefficient.
3193 Better would be to execute just this insn uncached, or generate a
3195 cpu_resume_from_signal(env
, NULL
);
3198 void dump_exec_info(FILE *f
,
3199 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3201 int i
, target_code_size
, max_target_code_size
;
3202 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3203 TranslationBlock
*tb
;
3205 target_code_size
= 0;
3206 max_target_code_size
= 0;
3208 direct_jmp_count
= 0;
3209 direct_jmp2_count
= 0;
3210 for(i
= 0; i
< nb_tbs
; i
++) {
3212 target_code_size
+= tb
->size
;
3213 if (tb
->size
> max_target_code_size
)
3214 max_target_code_size
= tb
->size
;
3215 if (tb
->page_addr
[1] != -1)
3217 if (tb
->tb_next_offset
[0] != 0xffff) {
3219 if (tb
->tb_next_offset
[1] != 0xffff) {
3220 direct_jmp2_count
++;
3224 /* XXX: avoid using doubles ? */
3225 cpu_fprintf(f
, "Translation buffer state:\n");
3226 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3227 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3228 cpu_fprintf(f
, "TB count %d/%d\n",
3229 nb_tbs
, code_gen_max_blocks
);
3230 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3231 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3232 max_target_code_size
);
3233 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3234 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3235 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3236 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3238 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3239 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3241 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3243 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3244 cpu_fprintf(f
, "\nStatistics:\n");
3245 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3246 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3247 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3248 tcg_dump_info(f
, cpu_fprintf
);
3251 #if !defined(CONFIG_USER_ONLY)
3253 #define MMUSUFFIX _cmmu
3254 #define GETPC() NULL
3255 #define env cpu_single_env
3256 #define SOFTMMU_CODE_ACCESS
3259 #include "softmmu_template.h"
3262 #include "softmmu_template.h"
3265 #include "softmmu_template.h"
3268 #include "softmmu_template.h"