2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 #if defined(TARGET_SPARC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 41
67 #elif defined(TARGET_SPARC)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 36
69 #elif defined(TARGET_ALPHA)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 42
71 #define TARGET_VIRT_ADDR_SPACE_BITS 42
72 #elif defined(TARGET_PPC64)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_X86_64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_I386)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 static TranslationBlock
*tbs
;
83 int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 uint8_t *phys_ram_dirty
;
115 static int in_migration
;
117 typedef struct RAMBlock
{
121 struct RAMBlock
*next
;
124 static RAMBlock
*ram_blocks
;
125 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 then we can no longer assume contiguous ram offsets, and external uses
127 of this variable will break. */
128 ram_addr_t last_ram_offset
;
132 /* current CPU in the current thread. It is only valid inside
134 CPUState
*cpu_single_env
;
135 /* 0 = Do not count executed instructions.
136 1 = Precise instruction counting.
137 2 = Adaptive rate instruction counting. */
139 /* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
143 typedef struct PageDesc
{
144 /* list of TBs intersecting this ram page */
145 TranslationBlock
*first_tb
;
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count
;
149 uint8_t *code_bitmap
;
150 #if defined(CONFIG_USER_ONLY)
155 typedef struct PhysPageDesc
{
156 /* offset in host memory of the page + io_index in the low bits */
157 ram_addr_t phys_offset
;
158 ram_addr_t region_offset
;
162 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 /* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
167 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
169 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
172 #define L1_SIZE (1 << L1_BITS)
173 #define L2_SIZE (1 << L2_BITS)
175 unsigned long qemu_real_host_page_size
;
176 unsigned long qemu_host_page_bits
;
177 unsigned long qemu_host_page_size
;
178 unsigned long qemu_host_page_mask
;
180 /* XXX: for system emulation, it could just be an array */
181 static PageDesc
*l1_map
[L1_SIZE
];
182 static PhysPageDesc
**l1_phys_map
;
184 #if !defined(CONFIG_USER_ONLY)
185 static void io_mem_init(void);
187 /* io memory support */
188 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
189 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
190 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
191 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
192 static int io_mem_watch
;
197 static const char *logfilename
= "qemu.log";
199 static const char *logfilename
= "/tmp/qemu.log";
203 static int log_append
= 0;
206 static int tlb_flush_count
;
207 static int tb_flush_count
;
208 static int tb_phys_invalidate_count
;
210 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
211 typedef struct subpage_t
{
212 target_phys_addr_t base
;
213 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
214 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
215 void *opaque
[TARGET_PAGE_SIZE
][2][4];
216 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
220 static void map_exec(void *addr
, long size
)
223 VirtualProtect(addr
, size
,
224 PAGE_EXECUTE_READWRITE
, &old_protect
);
228 static void map_exec(void *addr
, long size
)
230 unsigned long start
, end
, page_size
;
232 page_size
= getpagesize();
233 start
= (unsigned long)addr
;
234 start
&= ~(page_size
- 1);
236 end
= (unsigned long)addr
+ size
;
237 end
+= page_size
- 1;
238 end
&= ~(page_size
- 1);
240 mprotect((void *)start
, end
- start
,
241 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
245 static void page_init(void)
247 /* NOTE: we can always suppose that qemu_host_page_size >=
251 SYSTEM_INFO system_info
;
253 GetSystemInfo(&system_info
);
254 qemu_real_host_page_size
= system_info
.dwPageSize
;
257 qemu_real_host_page_size
= getpagesize();
259 if (qemu_host_page_size
== 0)
260 qemu_host_page_size
= qemu_real_host_page_size
;
261 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
262 qemu_host_page_size
= TARGET_PAGE_SIZE
;
263 qemu_host_page_bits
= 0;
264 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
265 qemu_host_page_bits
++;
266 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
267 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
268 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
270 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
272 long long startaddr
, endaddr
;
277 last_brk
= (unsigned long)sbrk(0);
278 f
= fopen("/proc/self/maps", "r");
281 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
283 startaddr
= MIN(startaddr
,
284 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
285 endaddr
= MIN(endaddr
,
286 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
287 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
288 TARGET_PAGE_ALIGN(endaddr
),
299 static inline PageDesc
**page_l1_map(target_ulong index
)
301 #if TARGET_LONG_BITS > 32
302 /* Host memory outside guest VM. For 32-bit targets we have already
303 excluded high addresses. */
304 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
307 return &l1_map
[index
>> L2_BITS
];
310 static inline PageDesc
*page_find_alloc(target_ulong index
)
313 lp
= page_l1_map(index
);
319 /* allocate if not found */
320 #if defined(CONFIG_USER_ONLY)
321 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
322 /* Don't use qemu_malloc because it may recurse. */
323 p
= mmap(NULL
, len
, PROT_READ
| PROT_WRITE
,
324 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
327 unsigned long addr
= h2g(p
);
328 page_set_flags(addr
& TARGET_PAGE_MASK
,
329 TARGET_PAGE_ALIGN(addr
+ len
),
333 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
337 return p
+ (index
& (L2_SIZE
- 1));
340 static inline PageDesc
*page_find(target_ulong index
)
343 lp
= page_l1_map(index
);
351 return p
+ (index
& (L2_SIZE
- 1));
354 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
359 p
= (void **)l1_phys_map
;
360 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
362 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
365 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
368 /* allocate if not found */
371 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
372 memset(p
, 0, sizeof(void *) * L1_SIZE
);
376 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
380 /* allocate if not found */
383 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
385 for (i
= 0; i
< L2_SIZE
; i
++) {
386 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
387 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
390 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
393 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
395 return phys_page_find_alloc(index
, 0);
398 #if !defined(CONFIG_USER_ONLY)
399 static void tlb_protect_code(ram_addr_t ram_addr
);
400 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
402 #define mmap_lock() do { } while(0)
403 #define mmap_unlock() do { } while(0)
406 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
408 #if defined(CONFIG_USER_ONLY)
409 /* Currently it is not recommended to allocate big chunks of data in
410 user mode. It will change when a dedicated libc will be used */
411 #define USE_STATIC_CODE_GEN_BUFFER
414 #ifdef USE_STATIC_CODE_GEN_BUFFER
415 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
418 static void code_gen_alloc(unsigned long tb_size
)
420 #ifdef USE_STATIC_CODE_GEN_BUFFER
421 code_gen_buffer
= static_code_gen_buffer
;
422 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
423 map_exec(code_gen_buffer
, code_gen_buffer_size
);
425 code_gen_buffer_size
= tb_size
;
426 if (code_gen_buffer_size
== 0) {
427 #if defined(CONFIG_USER_ONLY)
428 /* in user mode, phys_ram_size is not meaningful */
429 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
431 /* XXX: needs adjustments */
432 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
435 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
436 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
437 /* The code gen buffer location may have constraints depending on
438 the host cpu and OS */
439 #if defined(__linux__)
444 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
445 #if defined(__x86_64__)
447 /* Cannot map more than that */
448 if (code_gen_buffer_size
> (800 * 1024 * 1024))
449 code_gen_buffer_size
= (800 * 1024 * 1024);
450 #elif defined(__sparc_v9__)
451 // Map the buffer below 2G, so we can use direct calls and branches
453 start
= (void *) 0x60000000UL
;
454 if (code_gen_buffer_size
> (512 * 1024 * 1024))
455 code_gen_buffer_size
= (512 * 1024 * 1024);
456 #elif defined(__arm__)
457 /* Map the buffer below 32M, so we can use direct calls and branches */
459 start
= (void *) 0x01000000UL
;
460 if (code_gen_buffer_size
> 16 * 1024 * 1024)
461 code_gen_buffer_size
= 16 * 1024 * 1024;
463 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
464 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
466 if (code_gen_buffer
== MAP_FAILED
) {
467 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
471 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
475 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
476 #if defined(__x86_64__)
477 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478 * 0x40000000 is free */
480 addr
= (void *)0x40000000;
481 /* Cannot map more than that */
482 if (code_gen_buffer_size
> (800 * 1024 * 1024))
483 code_gen_buffer_size
= (800 * 1024 * 1024);
485 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
486 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
488 if (code_gen_buffer
== MAP_FAILED
) {
489 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
494 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
495 map_exec(code_gen_buffer
, code_gen_buffer_size
);
497 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
498 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
499 code_gen_buffer_max_size
= code_gen_buffer_size
-
500 code_gen_max_block_size();
501 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
502 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
505 /* Must be called before using the QEMU cpus. 'tb_size' is the size
506 (in bytes) allocated to the translation buffer. Zero means default
508 void cpu_exec_init_all(unsigned long tb_size
)
511 code_gen_alloc(tb_size
);
512 code_gen_ptr
= code_gen_buffer
;
514 #if !defined(CONFIG_USER_ONLY)
519 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
521 static void cpu_common_pre_save(void *opaque
)
523 CPUState
*env
= opaque
;
525 cpu_synchronize_state(env
);
528 static int cpu_common_pre_load(void *opaque
)
530 CPUState
*env
= opaque
;
532 cpu_synchronize_state(env
);
536 static int cpu_common_post_load(void *opaque
, int version_id
)
538 CPUState
*env
= opaque
;
540 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
541 version_id is increased. */
542 env
->interrupt_request
&= ~0x01;
548 static const VMStateDescription vmstate_cpu_common
= {
549 .name
= "cpu_common",
551 .minimum_version_id
= 1,
552 .minimum_version_id_old
= 1,
553 .pre_save
= cpu_common_pre_save
,
554 .pre_load
= cpu_common_pre_load
,
555 .post_load
= cpu_common_post_load
,
556 .fields
= (VMStateField
[]) {
557 VMSTATE_UINT32(halted
, CPUState
),
558 VMSTATE_UINT32(interrupt_request
, CPUState
),
559 VMSTATE_END_OF_LIST()
564 CPUState
*qemu_get_cpu(int cpu
)
566 CPUState
*env
= first_cpu
;
569 if (env
->cpu_index
== cpu
)
577 void cpu_exec_init(CPUState
*env
)
582 #if defined(CONFIG_USER_ONLY)
585 env
->next_cpu
= NULL
;
588 while (*penv
!= NULL
) {
589 penv
= &(*penv
)->next_cpu
;
592 env
->cpu_index
= cpu_index
;
594 QTAILQ_INIT(&env
->breakpoints
);
595 QTAILQ_INIT(&env
->watchpoints
);
597 #if defined(CONFIG_USER_ONLY)
600 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
602 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
603 cpu_save
, cpu_load
, env
);
607 static inline void invalidate_page_bitmap(PageDesc
*p
)
609 if (p
->code_bitmap
) {
610 qemu_free(p
->code_bitmap
);
611 p
->code_bitmap
= NULL
;
613 p
->code_write_count
= 0;
616 /* set to NULL all the 'first_tb' fields in all PageDescs */
617 static void page_flush_tb(void)
622 for(i
= 0; i
< L1_SIZE
; i
++) {
625 for(j
= 0; j
< L2_SIZE
; j
++) {
627 invalidate_page_bitmap(p
);
634 /* flush all the translation blocks */
635 /* XXX: tb_flush is currently not thread safe */
636 void tb_flush(CPUState
*env1
)
639 #if defined(DEBUG_FLUSH)
640 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
643 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
645 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
646 cpu_abort(env1
, "Internal error: code buffer overflow\n");
650 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
651 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
654 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
657 code_gen_ptr
= code_gen_buffer
;
658 /* XXX: flush processor icache at this point if cache flush is
663 #ifdef DEBUG_TB_CHECK
665 static void tb_invalidate_check(target_ulong address
)
667 TranslationBlock
*tb
;
669 address
&= TARGET_PAGE_MASK
;
670 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
671 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
672 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
673 address
>= tb
->pc
+ tb
->size
)) {
674 printf("ERROR invalidate: address=" TARGET_FMT_lx
675 " PC=%08lx size=%04x\n",
676 address
, (long)tb
->pc
, tb
->size
);
682 /* verify that all the pages have correct rights for code */
683 static void tb_page_check(void)
685 TranslationBlock
*tb
;
686 int i
, flags1
, flags2
;
688 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
689 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
690 flags1
= page_get_flags(tb
->pc
);
691 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
692 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
693 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
694 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
702 /* invalidate one TB */
703 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
706 TranslationBlock
*tb1
;
710 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
713 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
717 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
719 TranslationBlock
*tb1
;
725 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
727 *ptb
= tb1
->page_next
[n1
];
730 ptb
= &tb1
->page_next
[n1
];
734 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
736 TranslationBlock
*tb1
, **ptb
;
739 ptb
= &tb
->jmp_next
[n
];
742 /* find tb(n) in circular list */
746 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
747 if (n1
== n
&& tb1
== tb
)
750 ptb
= &tb1
->jmp_first
;
752 ptb
= &tb1
->jmp_next
[n1
];
755 /* now we can suppress tb(n) from the list */
756 *ptb
= tb
->jmp_next
[n
];
758 tb
->jmp_next
[n
] = NULL
;
762 /* reset the jump entry 'n' of a TB so that it is not chained to
764 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
766 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
769 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
774 target_phys_addr_t phys_pc
;
775 TranslationBlock
*tb1
, *tb2
;
777 /* remove the TB from the hash list */
778 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
779 h
= tb_phys_hash_func(phys_pc
);
780 tb_remove(&tb_phys_hash
[h
], tb
,
781 offsetof(TranslationBlock
, phys_hash_next
));
783 /* remove the TB from the page list */
784 if (tb
->page_addr
[0] != page_addr
) {
785 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
786 tb_page_remove(&p
->first_tb
, tb
);
787 invalidate_page_bitmap(p
);
789 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
790 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
791 tb_page_remove(&p
->first_tb
, tb
);
792 invalidate_page_bitmap(p
);
795 tb_invalidated_flag
= 1;
797 /* remove the TB from the hash list */
798 h
= tb_jmp_cache_hash_func(tb
->pc
);
799 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
800 if (env
->tb_jmp_cache
[h
] == tb
)
801 env
->tb_jmp_cache
[h
] = NULL
;
804 /* suppress this TB from the two jump lists */
805 tb_jmp_remove(tb
, 0);
806 tb_jmp_remove(tb
, 1);
808 /* suppress any remaining jumps to this TB */
814 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
815 tb2
= tb1
->jmp_next
[n1
];
816 tb_reset_jump(tb1
, n1
);
817 tb1
->jmp_next
[n1
] = NULL
;
820 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
822 tb_phys_invalidate_count
++;
825 static inline void set_bits(uint8_t *tab
, int start
, int len
)
831 mask
= 0xff << (start
& 7);
832 if ((start
& ~7) == (end
& ~7)) {
834 mask
&= ~(0xff << (end
& 7));
839 start
= (start
+ 8) & ~7;
841 while (start
< end1
) {
846 mask
= ~(0xff << (end
& 7));
852 static void build_page_bitmap(PageDesc
*p
)
854 int n
, tb_start
, tb_end
;
855 TranslationBlock
*tb
;
857 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
862 tb
= (TranslationBlock
*)((long)tb
& ~3);
863 /* NOTE: this is subtle as a TB may span two physical pages */
865 /* NOTE: tb_end may be after the end of the page, but
866 it is not a problem */
867 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
868 tb_end
= tb_start
+ tb
->size
;
869 if (tb_end
> TARGET_PAGE_SIZE
)
870 tb_end
= TARGET_PAGE_SIZE
;
873 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
875 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
876 tb
= tb
->page_next
[n
];
880 TranslationBlock
*tb_gen_code(CPUState
*env
,
881 target_ulong pc
, target_ulong cs_base
,
882 int flags
, int cflags
)
884 TranslationBlock
*tb
;
886 target_ulong phys_pc
, phys_page2
, virt_page2
;
889 phys_pc
= get_phys_addr_code(env
, pc
);
892 /* flush must be done */
894 /* cannot fail at this point */
896 /* Don't forget to invalidate previous TB info. */
897 tb_invalidated_flag
= 1;
899 tc_ptr
= code_gen_ptr
;
901 tb
->cs_base
= cs_base
;
904 cpu_gen_code(env
, tb
, &code_gen_size
);
905 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
907 /* check next page if needed */
908 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
910 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
911 phys_page2
= get_phys_addr_code(env
, virt_page2
);
913 tb_link_phys(tb
, phys_pc
, phys_page2
);
917 /* invalidate all TBs which intersect with the target physical page
918 starting in range [start;end[. NOTE: start and end must refer to
919 the same physical page. 'is_cpu_write_access' should be true if called
920 from a real cpu write access: the virtual CPU will exit the current
921 TB if code is modified inside this TB. */
922 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
923 int is_cpu_write_access
)
925 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
926 CPUState
*env
= cpu_single_env
;
927 target_ulong tb_start
, tb_end
;
930 #ifdef TARGET_HAS_PRECISE_SMC
931 int current_tb_not_found
= is_cpu_write_access
;
932 TranslationBlock
*current_tb
= NULL
;
933 int current_tb_modified
= 0;
934 target_ulong current_pc
= 0;
935 target_ulong current_cs_base
= 0;
936 int current_flags
= 0;
937 #endif /* TARGET_HAS_PRECISE_SMC */
939 p
= page_find(start
>> TARGET_PAGE_BITS
);
942 if (!p
->code_bitmap
&&
943 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
944 is_cpu_write_access
) {
945 /* build code bitmap */
946 build_page_bitmap(p
);
949 /* we remove all the TBs in the range [start, end[ */
950 /* XXX: see if in some cases it could be faster to invalidate all the code */
954 tb
= (TranslationBlock
*)((long)tb
& ~3);
955 tb_next
= tb
->page_next
[n
];
956 /* NOTE: this is subtle as a TB may span two physical pages */
958 /* NOTE: tb_end may be after the end of the page, but
959 it is not a problem */
960 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
961 tb_end
= tb_start
+ tb
->size
;
963 tb_start
= tb
->page_addr
[1];
964 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
966 if (!(tb_end
<= start
|| tb_start
>= end
)) {
967 #ifdef TARGET_HAS_PRECISE_SMC
968 if (current_tb_not_found
) {
969 current_tb_not_found
= 0;
971 if (env
->mem_io_pc
) {
972 /* now we have a real cpu fault */
973 current_tb
= tb_find_pc(env
->mem_io_pc
);
976 if (current_tb
== tb
&&
977 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
978 /* If we are modifying the current TB, we must stop
979 its execution. We could be more precise by checking
980 that the modification is after the current PC, but it
981 would require a specialized function to partially
982 restore the CPU state */
984 current_tb_modified
= 1;
985 cpu_restore_state(current_tb
, env
,
986 env
->mem_io_pc
, NULL
);
987 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
990 #endif /* TARGET_HAS_PRECISE_SMC */
991 /* we need to do that to handle the case where a signal
992 occurs while doing tb_phys_invalidate() */
995 saved_tb
= env
->current_tb
;
996 env
->current_tb
= NULL
;
998 tb_phys_invalidate(tb
, -1);
1000 env
->current_tb
= saved_tb
;
1001 if (env
->interrupt_request
&& env
->current_tb
)
1002 cpu_interrupt(env
, env
->interrupt_request
);
1007 #if !defined(CONFIG_USER_ONLY)
1008 /* if no code remaining, no need to continue to use slow writes */
1010 invalidate_page_bitmap(p
);
1011 if (is_cpu_write_access
) {
1012 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1016 #ifdef TARGET_HAS_PRECISE_SMC
1017 if (current_tb_modified
) {
1018 /* we generate a block containing just the instruction
1019 modifying the memory. It will ensure that it cannot modify
1021 env
->current_tb
= NULL
;
1022 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1023 cpu_resume_from_signal(env
, NULL
);
1028 /* len must be <= 8 and start must be a multiple of len */
1029 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1035 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036 cpu_single_env
->mem_io_vaddr
, len
,
1037 cpu_single_env
->eip
,
1038 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1041 p
= page_find(start
>> TARGET_PAGE_BITS
);
1044 if (p
->code_bitmap
) {
1045 offset
= start
& ~TARGET_PAGE_MASK
;
1046 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1047 if (b
& ((1 << len
) - 1))
1051 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1055 #if !defined(CONFIG_SOFTMMU)
1056 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1057 unsigned long pc
, void *puc
)
1059 TranslationBlock
*tb
;
1062 #ifdef TARGET_HAS_PRECISE_SMC
1063 TranslationBlock
*current_tb
= NULL
;
1064 CPUState
*env
= cpu_single_env
;
1065 int current_tb_modified
= 0;
1066 target_ulong current_pc
= 0;
1067 target_ulong current_cs_base
= 0;
1068 int current_flags
= 0;
1071 addr
&= TARGET_PAGE_MASK
;
1072 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1076 #ifdef TARGET_HAS_PRECISE_SMC
1077 if (tb
&& pc
!= 0) {
1078 current_tb
= tb_find_pc(pc
);
1081 while (tb
!= NULL
) {
1083 tb
= (TranslationBlock
*)((long)tb
& ~3);
1084 #ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb
== tb
&&
1086 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1087 /* If we are modifying the current TB, we must stop
1088 its execution. We could be more precise by checking
1089 that the modification is after the current PC, but it
1090 would require a specialized function to partially
1091 restore the CPU state */
1093 current_tb_modified
= 1;
1094 cpu_restore_state(current_tb
, env
, pc
, puc
);
1095 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1098 #endif /* TARGET_HAS_PRECISE_SMC */
1099 tb_phys_invalidate(tb
, addr
);
1100 tb
= tb
->page_next
[n
];
1103 #ifdef TARGET_HAS_PRECISE_SMC
1104 if (current_tb_modified
) {
1105 /* we generate a block containing just the instruction
1106 modifying the memory. It will ensure that it cannot modify
1108 env
->current_tb
= NULL
;
1109 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1110 cpu_resume_from_signal(env
, puc
);
1116 /* add the tb in the target page and protect it if necessary */
1117 static inline void tb_alloc_page(TranslationBlock
*tb
,
1118 unsigned int n
, target_ulong page_addr
)
1121 TranslationBlock
*last_first_tb
;
1123 tb
->page_addr
[n
] = page_addr
;
1124 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1125 tb
->page_next
[n
] = p
->first_tb
;
1126 last_first_tb
= p
->first_tb
;
1127 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1128 invalidate_page_bitmap(p
);
1130 #if defined(TARGET_HAS_SMC) || 1
1132 #if defined(CONFIG_USER_ONLY)
1133 if (p
->flags
& PAGE_WRITE
) {
1138 /* force the host page as non writable (writes will have a
1139 page fault + mprotect overhead) */
1140 page_addr
&= qemu_host_page_mask
;
1142 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1143 addr
+= TARGET_PAGE_SIZE
) {
1145 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1149 p2
->flags
&= ~PAGE_WRITE
;
1150 page_get_flags(addr
);
1152 mprotect(g2h(page_addr
), qemu_host_page_size
,
1153 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1154 #ifdef DEBUG_TB_INVALIDATE
1155 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1160 /* if some code is already present, then the pages are already
1161 protected. So we handle the case where only the first TB is
1162 allocated in a physical page */
1163 if (!last_first_tb
) {
1164 tlb_protect_code(page_addr
);
1168 #endif /* TARGET_HAS_SMC */
1171 /* Allocate a new translation block. Flush the translation buffer if
1172 too many translation blocks or too much generated code. */
1173 TranslationBlock
*tb_alloc(target_ulong pc
)
1175 TranslationBlock
*tb
;
1177 if (nb_tbs
>= code_gen_max_blocks
||
1178 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1180 tb
= &tbs
[nb_tbs
++];
1186 void tb_free(TranslationBlock
*tb
)
1188 /* In practice this is mostly used for single use temporary TB
1189 Ignore the hard cases and just back up if this TB happens to
1190 be the last one generated. */
1191 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1192 code_gen_ptr
= tb
->tc_ptr
;
1197 /* add a new TB and link it to the physical page tables. phys_page2 is
1198 (-1) to indicate that only one page contains the TB. */
1199 void tb_link_phys(TranslationBlock
*tb
,
1200 target_ulong phys_pc
, target_ulong phys_page2
)
1203 TranslationBlock
**ptb
;
1205 /* Grab the mmap lock to stop another thread invalidating this TB
1206 before we are done. */
1208 /* add in the physical hash table */
1209 h
= tb_phys_hash_func(phys_pc
);
1210 ptb
= &tb_phys_hash
[h
];
1211 tb
->phys_hash_next
= *ptb
;
1214 /* add in the page list */
1215 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1216 if (phys_page2
!= -1)
1217 tb_alloc_page(tb
, 1, phys_page2
);
1219 tb
->page_addr
[1] = -1;
1221 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1222 tb
->jmp_next
[0] = NULL
;
1223 tb
->jmp_next
[1] = NULL
;
1225 /* init original jump addresses */
1226 if (tb
->tb_next_offset
[0] != 0xffff)
1227 tb_reset_jump(tb
, 0);
1228 if (tb
->tb_next_offset
[1] != 0xffff)
1229 tb_reset_jump(tb
, 1);
1231 #ifdef DEBUG_TB_CHECK
1237 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238 tb[1].tc_ptr. Return NULL if not found */
1239 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1241 int m_min
, m_max
, m
;
1243 TranslationBlock
*tb
;
1247 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1248 tc_ptr
>= (unsigned long)code_gen_ptr
)
1250 /* binary search (cf Knuth) */
1253 while (m_min
<= m_max
) {
1254 m
= (m_min
+ m_max
) >> 1;
1256 v
= (unsigned long)tb
->tc_ptr
;
1259 else if (tc_ptr
< v
) {
1268 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1270 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1272 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1275 tb1
= tb
->jmp_next
[n
];
1277 /* find head of list */
1280 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1283 tb1
= tb1
->jmp_next
[n1
];
1285 /* we are now sure now that tb jumps to tb1 */
1288 /* remove tb from the jmp_first list */
1289 ptb
= &tb_next
->jmp_first
;
1293 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1294 if (n1
== n
&& tb1
== tb
)
1296 ptb
= &tb1
->jmp_next
[n1
];
1298 *ptb
= tb
->jmp_next
[n
];
1299 tb
->jmp_next
[n
] = NULL
;
1301 /* suppress the jump to next tb in generated code */
1302 tb_reset_jump(tb
, n
);
1304 /* suppress jumps in the tb on which we could have jumped */
1305 tb_reset_jump_recursive(tb_next
);
1309 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1311 tb_reset_jump_recursive2(tb
, 0);
1312 tb_reset_jump_recursive2(tb
, 1);
1315 #if defined(TARGET_HAS_ICE)
1316 #if defined(CONFIG_USER_ONLY)
1317 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1319 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1322 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1324 target_phys_addr_t addr
;
1326 ram_addr_t ram_addr
;
1329 addr
= cpu_get_phys_page_debug(env
, pc
);
1330 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1332 pd
= IO_MEM_UNASSIGNED
;
1334 pd
= p
->phys_offset
;
1336 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1337 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1340 #endif /* TARGET_HAS_ICE */
1342 /* Add a watchpoint. */
1343 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1344 int flags
, CPUWatchpoint
**watchpoint
)
1346 target_ulong len_mask
= ~(len
- 1);
1349 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1350 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1351 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1352 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1355 wp
= qemu_malloc(sizeof(*wp
));
1358 wp
->len_mask
= len_mask
;
1361 /* keep all GDB-injected watchpoints in front */
1363 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1365 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1367 tlb_flush_page(env
, addr
);
1374 /* Remove a specific watchpoint. */
1375 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1378 target_ulong len_mask
= ~(len
- 1);
1381 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1382 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1383 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1384 cpu_watchpoint_remove_by_ref(env
, wp
);
1391 /* Remove a specific watchpoint by reference. */
1392 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1394 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1396 tlb_flush_page(env
, watchpoint
->vaddr
);
1398 qemu_free(watchpoint
);
1401 /* Remove all matching watchpoints. */
1402 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1404 CPUWatchpoint
*wp
, *next
;
1406 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1407 if (wp
->flags
& mask
)
1408 cpu_watchpoint_remove_by_ref(env
, wp
);
1412 /* Add a breakpoint. */
1413 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1414 CPUBreakpoint
**breakpoint
)
1416 #if defined(TARGET_HAS_ICE)
1419 bp
= qemu_malloc(sizeof(*bp
));
1424 /* keep all GDB-injected breakpoints in front */
1426 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1428 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1430 breakpoint_invalidate(env
, pc
);
1440 /* Remove a specific breakpoint. */
1441 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1443 #if defined(TARGET_HAS_ICE)
1446 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1447 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1448 cpu_breakpoint_remove_by_ref(env
, bp
);
1458 /* Remove a specific breakpoint by reference. */
1459 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1461 #if defined(TARGET_HAS_ICE)
1462 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1464 breakpoint_invalidate(env
, breakpoint
->pc
);
1466 qemu_free(breakpoint
);
1470 /* Remove all matching breakpoints. */
1471 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1473 #if defined(TARGET_HAS_ICE)
1474 CPUBreakpoint
*bp
, *next
;
1476 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1477 if (bp
->flags
& mask
)
1478 cpu_breakpoint_remove_by_ref(env
, bp
);
1483 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1484 CPU loop after each instruction */
1485 void cpu_single_step(CPUState
*env
, int enabled
)
1487 #if defined(TARGET_HAS_ICE)
1488 if (env
->singlestep_enabled
!= enabled
) {
1489 env
->singlestep_enabled
= enabled
;
1491 kvm_update_guest_debug(env
, 0);
1493 /* must flush all the translated code to avoid inconsistencies */
1494 /* XXX: only flush what is necessary */
1501 /* enable or disable low levels log */
1502 void cpu_set_log(int log_flags
)
1504 loglevel
= log_flags
;
1505 if (loglevel
&& !logfile
) {
1506 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1508 perror(logfilename
);
1511 #if !defined(CONFIG_SOFTMMU)
1512 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1514 static char logfile_buf
[4096];
1515 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1517 #elif !defined(_WIN32)
1518 /* Win32 doesn't support line-buffering and requires size >= 2 */
1519 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1523 if (!loglevel
&& logfile
) {
1529 void cpu_set_log_filename(const char *filename
)
1531 logfilename
= strdup(filename
);
1536 cpu_set_log(loglevel
);
1539 static void cpu_unlink_tb(CPUState
*env
)
1541 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1542 problem and hope the cpu will stop of its own accord. For userspace
1543 emulation this often isn't actually as bad as it sounds. Often
1544 signals are used primarily to interrupt blocking syscalls. */
1545 TranslationBlock
*tb
;
1546 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1548 spin_lock(&interrupt_lock
);
1549 tb
= env
->current_tb
;
1550 /* if the cpu is currently executing code, we must unlink it and
1551 all the potentially executing TB */
1553 env
->current_tb
= NULL
;
1554 tb_reset_jump_recursive(tb
);
1556 spin_unlock(&interrupt_lock
);
1559 /* mask must never be zero, except for A20 change call */
1560 void cpu_interrupt(CPUState
*env
, int mask
)
1564 old_mask
= env
->interrupt_request
;
1565 env
->interrupt_request
|= mask
;
1567 #ifndef CONFIG_USER_ONLY
1569 * If called from iothread context, wake the target cpu in
1572 if (!qemu_cpu_self(env
)) {
1579 env
->icount_decr
.u16
.high
= 0xffff;
1580 #ifndef CONFIG_USER_ONLY
1582 && (mask
& ~old_mask
) != 0) {
1583 cpu_abort(env
, "Raised interrupt while not in I/O function");
1591 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1593 env
->interrupt_request
&= ~mask
;
1596 void cpu_exit(CPUState
*env
)
1598 env
->exit_request
= 1;
1602 const CPULogItem cpu_log_items
[] = {
1603 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1604 "show generated host assembly code for each compiled TB" },
1605 { CPU_LOG_TB_IN_ASM
, "in_asm",
1606 "show target assembly code for each compiled TB" },
1607 { CPU_LOG_TB_OP
, "op",
1608 "show micro ops for each compiled TB" },
1609 { CPU_LOG_TB_OP_OPT
, "op_opt",
1612 "before eflags optimization and "
1614 "after liveness analysis" },
1615 { CPU_LOG_INT
, "int",
1616 "show interrupts/exceptions in short format" },
1617 { CPU_LOG_EXEC
, "exec",
1618 "show trace before each executed TB (lots of logs)" },
1619 { CPU_LOG_TB_CPU
, "cpu",
1620 "show CPU state before block translation" },
1622 { CPU_LOG_PCALL
, "pcall",
1623 "show protected mode far calls/returns/exceptions" },
1624 { CPU_LOG_RESET
, "cpu_reset",
1625 "show CPU state before CPU resets" },
1628 { CPU_LOG_IOPORT
, "ioport",
1629 "show all i/o ports accesses" },
1634 #ifndef CONFIG_USER_ONLY
1635 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1636 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1638 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1640 ram_addr_t phys_offset
)
1642 CPUPhysMemoryClient
*client
;
1643 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1644 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1648 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1649 target_phys_addr_t end
)
1651 CPUPhysMemoryClient
*client
;
1652 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1653 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1660 static int cpu_notify_migration_log(int enable
)
1662 CPUPhysMemoryClient
*client
;
1663 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1664 int r
= client
->migration_log(client
, enable
);
1671 static void phys_page_for_each_in_l1_map(PhysPageDesc
**phys_map
,
1672 CPUPhysMemoryClient
*client
)
1677 for (l1
= 0; l1
< L1_SIZE
; ++l1
) {
1682 for (l2
= 0; l2
< L2_SIZE
; ++l2
) {
1683 if (pd
[l2
].phys_offset
== IO_MEM_UNASSIGNED
) {
1686 client
->set_memory(client
, pd
[l2
].region_offset
,
1687 TARGET_PAGE_SIZE
, pd
[l2
].phys_offset
);
1692 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1694 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
1696 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1697 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1699 void **phys_map
= (void **)l1_phys_map
;
1704 for (l1
= 0; l1
< L1_SIZE
; ++l1
) {
1706 phys_page_for_each_in_l1_map(phys_map
[l1
], client
);
1713 phys_page_for_each_in_l1_map(l1_phys_map
, client
);
1717 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1719 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1720 phys_page_for_each(client
);
1723 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1725 QLIST_REMOVE(client
, list
);
1729 static int cmp1(const char *s1
, int n
, const char *s2
)
1731 if (strlen(s2
) != n
)
1733 return memcmp(s1
, s2
, n
) == 0;
1736 /* takes a comma separated list of log masks. Return 0 if error. */
1737 int cpu_str_to_log_mask(const char *str
)
1739 const CPULogItem
*item
;
1746 p1
= strchr(p
, ',');
1749 if(cmp1(p
,p1
-p
,"all")) {
1750 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1754 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1755 if (cmp1(p
, p1
- p
, item
->name
))
1769 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1776 fprintf(stderr
, "qemu: fatal: ");
1777 vfprintf(stderr
, fmt
, ap
);
1778 fprintf(stderr
, "\n");
1780 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1782 cpu_dump_state(env
, stderr
, fprintf
, 0);
1784 if (qemu_log_enabled()) {
1785 qemu_log("qemu: fatal: ");
1786 qemu_log_vprintf(fmt
, ap2
);
1789 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1791 log_cpu_state(env
, 0);
1798 #if defined(CONFIG_USER_ONLY)
1800 struct sigaction act
;
1801 sigfillset(&act
.sa_mask
);
1802 act
.sa_handler
= SIG_DFL
;
1803 sigaction(SIGABRT
, &act
, NULL
);
1809 CPUState
*cpu_copy(CPUState
*env
)
1811 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1812 CPUState
*next_cpu
= new_env
->next_cpu
;
1813 int cpu_index
= new_env
->cpu_index
;
1814 #if defined(TARGET_HAS_ICE)
1819 memcpy(new_env
, env
, sizeof(CPUState
));
1821 /* Preserve chaining and index. */
1822 new_env
->next_cpu
= next_cpu
;
1823 new_env
->cpu_index
= cpu_index
;
1825 /* Clone all break/watchpoints.
1826 Note: Once we support ptrace with hw-debug register access, make sure
1827 BP_CPU break/watchpoints are handled correctly on clone. */
1828 QTAILQ_INIT(&env
->breakpoints
);
1829 QTAILQ_INIT(&env
->watchpoints
);
1830 #if defined(TARGET_HAS_ICE)
1831 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1832 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1834 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1835 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1843 #if !defined(CONFIG_USER_ONLY)
1845 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1849 /* Discard jump cache entries for any tb which might potentially
1850 overlap the flushed page. */
1851 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1852 memset (&env
->tb_jmp_cache
[i
], 0,
1853 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1855 i
= tb_jmp_cache_hash_page(addr
);
1856 memset (&env
->tb_jmp_cache
[i
], 0,
1857 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1860 static CPUTLBEntry s_cputlb_empty_entry
= {
1867 /* NOTE: if flush_global is true, also flush global entries (not
1869 void tlb_flush(CPUState
*env
, int flush_global
)
1873 #if defined(DEBUG_TLB)
1874 printf("tlb_flush:\n");
1876 /* must reset current TB so that interrupts cannot modify the
1877 links while we are modifying them */
1878 env
->current_tb
= NULL
;
1880 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1882 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1883 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1887 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1892 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1894 if (addr
== (tlb_entry
->addr_read
&
1895 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1896 addr
== (tlb_entry
->addr_write
&
1897 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1898 addr
== (tlb_entry
->addr_code
&
1899 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1900 *tlb_entry
= s_cputlb_empty_entry
;
1904 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1909 #if defined(DEBUG_TLB)
1910 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1912 /* must reset current TB so that interrupts cannot modify the
1913 links while we are modifying them */
1914 env
->current_tb
= NULL
;
1916 addr
&= TARGET_PAGE_MASK
;
1917 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1918 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1919 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1921 tlb_flush_jmp_cache(env
, addr
);
1924 /* update the TLBs so that writes to code in the virtual page 'addr'
1926 static void tlb_protect_code(ram_addr_t ram_addr
)
1928 cpu_physical_memory_reset_dirty(ram_addr
,
1929 ram_addr
+ TARGET_PAGE_SIZE
,
1933 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1934 tested for self modifying code */
1935 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1938 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1941 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1942 unsigned long start
, unsigned long length
)
1945 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1946 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1947 if ((addr
- start
) < length
) {
1948 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1953 /* Note: start and end must be within the same ram block. */
1954 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1958 unsigned long length
, start1
;
1962 start
&= TARGET_PAGE_MASK
;
1963 end
= TARGET_PAGE_ALIGN(end
);
1965 length
= end
- start
;
1968 len
= length
>> TARGET_PAGE_BITS
;
1969 mask
= ~dirty_flags
;
1970 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1971 for(i
= 0; i
< len
; i
++)
1974 /* we modify the TLB cache so that the dirty bit will be set again
1975 when accessing the range */
1976 start1
= (unsigned long)qemu_get_ram_ptr(start
);
1977 /* Chek that we don't span multiple blocks - this breaks the
1978 address comparisons below. */
1979 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
1980 != (end
- 1) - start
) {
1984 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1986 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1987 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1988 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
1994 int cpu_physical_memory_set_dirty_tracking(int enable
)
1997 in_migration
= enable
;
1998 ret
= cpu_notify_migration_log(!!enable
);
2002 int cpu_physical_memory_get_dirty_tracking(void)
2004 return in_migration
;
2007 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2008 target_phys_addr_t end_addr
)
2012 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2016 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2018 ram_addr_t ram_addr
;
2021 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2022 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2023 + tlb_entry
->addend
);
2024 ram_addr
= qemu_ram_addr_from_host(p
);
2025 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2026 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2031 /* update the TLB according to the current state of the dirty bits */
2032 void cpu_tlb_update_dirty(CPUState
*env
)
2036 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2037 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2038 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2042 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2044 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2045 tlb_entry
->addr_write
= vaddr
;
2048 /* update the TLB corresponding to virtual page vaddr
2049 so that it is no longer dirty */
2050 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2055 vaddr
&= TARGET_PAGE_MASK
;
2056 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2057 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2058 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2061 /* add a new TLB entry. At most one entry for a given virtual address
2062 is permitted. Return 0 if OK or 2 if the page could not be mapped
2063 (can only happen in non SOFTMMU mode for I/O pages or pages
2064 conflicting with the host address space). */
2065 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2066 target_phys_addr_t paddr
, int prot
,
2067 int mmu_idx
, int is_softmmu
)
2072 target_ulong address
;
2073 target_ulong code_address
;
2074 target_phys_addr_t addend
;
2078 target_phys_addr_t iotlb
;
2080 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2082 pd
= IO_MEM_UNASSIGNED
;
2084 pd
= p
->phys_offset
;
2086 #if defined(DEBUG_TLB)
2087 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2088 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2093 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2094 /* IO memory case (romd handled later) */
2095 address
|= TLB_MMIO
;
2097 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2098 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2100 iotlb
= pd
& TARGET_PAGE_MASK
;
2101 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2102 iotlb
|= IO_MEM_NOTDIRTY
;
2104 iotlb
|= IO_MEM_ROM
;
2106 /* IO handlers are currently passed a physical address.
2107 It would be nice to pass an offset from the base address
2108 of that region. This would avoid having to special case RAM,
2109 and avoid full address decoding in every device.
2110 We can't use the high bits of pd for this because
2111 IO_MEM_ROMD uses these as a ram address. */
2112 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2114 iotlb
+= p
->region_offset
;
2120 code_address
= address
;
2121 /* Make accesses to pages with watchpoints go via the
2122 watchpoint trap routines. */
2123 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2124 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2125 iotlb
= io_mem_watch
+ paddr
;
2126 /* TODO: The memory case can be optimized by not trapping
2127 reads of pages with a write breakpoint. */
2128 address
|= TLB_MMIO
;
2132 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2133 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2134 te
= &env
->tlb_table
[mmu_idx
][index
];
2135 te
->addend
= addend
- vaddr
;
2136 if (prot
& PAGE_READ
) {
2137 te
->addr_read
= address
;
2142 if (prot
& PAGE_EXEC
) {
2143 te
->addr_code
= code_address
;
2147 if (prot
& PAGE_WRITE
) {
2148 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2149 (pd
& IO_MEM_ROMD
)) {
2150 /* Write access calls the I/O callback. */
2151 te
->addr_write
= address
| TLB_MMIO
;
2152 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2153 !cpu_physical_memory_is_dirty(pd
)) {
2154 te
->addr_write
= address
| TLB_NOTDIRTY
;
2156 te
->addr_write
= address
;
2159 te
->addr_write
= -1;
2166 void tlb_flush(CPUState
*env
, int flush_global
)
2170 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2174 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2175 target_phys_addr_t paddr
, int prot
,
2176 int mmu_idx
, int is_softmmu
)
2182 * Walks guest process memory "regions" one by one
2183 * and calls callback function 'fn' for each region.
2185 int walk_memory_regions(void *priv
,
2186 int (*fn
)(void *, unsigned long, unsigned long, unsigned long))
2188 unsigned long start
, end
;
2190 int i
, j
, prot
, prot1
;
2196 for (i
= 0; i
<= L1_SIZE
; i
++) {
2197 p
= (i
< L1_SIZE
) ? l1_map
[i
] : NULL
;
2198 for (j
= 0; j
< L2_SIZE
; j
++) {
2199 prot1
= (p
== NULL
) ? 0 : p
[j
].flags
;
2201 * "region" is one continuous chunk of memory
2202 * that has same protection flags set.
2204 if (prot1
!= prot
) {
2205 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2207 rc
= (*fn
)(priv
, start
, end
, prot
);
2208 /* callback can stop iteration by returning != 0 */
2225 static int dump_region(void *priv
, unsigned long start
,
2226 unsigned long end
, unsigned long prot
)
2228 FILE *f
= (FILE *)priv
;
2230 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2231 start
, end
, end
- start
,
2232 ((prot
& PAGE_READ
) ? 'r' : '-'),
2233 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2234 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2239 /* dump memory mappings */
2240 void page_dump(FILE *f
)
2242 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2243 "start", "end", "size", "prot");
2244 walk_memory_regions(f
, dump_region
);
2247 int page_get_flags(target_ulong address
)
2251 p
= page_find(address
>> TARGET_PAGE_BITS
);
2257 /* modify the flags of a page and invalidate the code if
2258 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2259 depending on PAGE_WRITE */
2260 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2265 /* mmap_lock should already be held. */
2266 start
= start
& TARGET_PAGE_MASK
;
2267 end
= TARGET_PAGE_ALIGN(end
);
2268 if (flags
& PAGE_WRITE
)
2269 flags
|= PAGE_WRITE_ORG
;
2270 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2271 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2272 /* We may be called for host regions that are outside guest
2276 /* if the write protection is set, then we invalidate the code
2278 if (!(p
->flags
& PAGE_WRITE
) &&
2279 (flags
& PAGE_WRITE
) &&
2281 tb_invalidate_phys_page(addr
, 0, NULL
);
2287 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2293 if (start
+ len
< start
)
2294 /* we've wrapped around */
2297 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2298 start
= start
& TARGET_PAGE_MASK
;
2300 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2301 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2304 if( !(p
->flags
& PAGE_VALID
) )
2307 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2309 if (flags
& PAGE_WRITE
) {
2310 if (!(p
->flags
& PAGE_WRITE_ORG
))
2312 /* unprotect the page if it was put read-only because it
2313 contains translated code */
2314 if (!(p
->flags
& PAGE_WRITE
)) {
2315 if (!page_unprotect(addr
, 0, NULL
))
2324 /* called from signal handler: invalidate the code and unprotect the
2325 page. Return TRUE if the fault was successfully handled. */
2326 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2328 unsigned int page_index
, prot
, pindex
;
2330 target_ulong host_start
, host_end
, addr
;
2332 /* Technically this isn't safe inside a signal handler. However we
2333 know this only ever happens in a synchronous SEGV handler, so in
2334 practice it seems to be ok. */
2337 host_start
= address
& qemu_host_page_mask
;
2338 page_index
= host_start
>> TARGET_PAGE_BITS
;
2339 p1
= page_find(page_index
);
2344 host_end
= host_start
+ qemu_host_page_size
;
2347 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2351 /* if the page was really writable, then we change its
2352 protection back to writable */
2353 if (prot
& PAGE_WRITE_ORG
) {
2354 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2355 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2356 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2357 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2358 p1
[pindex
].flags
|= PAGE_WRITE
;
2359 /* and since the content will be modified, we must invalidate
2360 the corresponding translated code. */
2361 tb_invalidate_phys_page(address
, pc
, puc
);
2362 #ifdef DEBUG_TB_CHECK
2363 tb_invalidate_check(address
);
2373 static inline void tlb_set_dirty(CPUState
*env
,
2374 unsigned long addr
, target_ulong vaddr
)
2377 #endif /* defined(CONFIG_USER_ONLY) */
2379 #if !defined(CONFIG_USER_ONLY)
2381 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2382 ram_addr_t memory
, ram_addr_t region_offset
);
2383 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2384 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2385 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2388 if (addr > start_addr) \
2391 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2392 if (start_addr2 > 0) \
2396 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2397 end_addr2 = TARGET_PAGE_SIZE - 1; \
2399 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2400 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2405 /* register physical memory.
2406 For RAM, 'size' must be a multiple of the target page size.
2407 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2408 io memory page. The address used when calling the IO function is
2409 the offset from the start of the region, plus region_offset. Both
2410 start_addr and region_offset are rounded down to a page boundary
2411 before calculating this offset. This should not be a problem unless
2412 the low bits of start_addr and region_offset differ. */
2413 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2415 ram_addr_t phys_offset
,
2416 ram_addr_t region_offset
)
2418 target_phys_addr_t addr
, end_addr
;
2421 ram_addr_t orig_size
= size
;
2424 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2426 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2427 region_offset
= start_addr
;
2429 region_offset
&= TARGET_PAGE_MASK
;
2430 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2431 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2432 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2433 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2434 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2435 ram_addr_t orig_memory
= p
->phys_offset
;
2436 target_phys_addr_t start_addr2
, end_addr2
;
2437 int need_subpage
= 0;
2439 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2441 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2442 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2443 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2444 &p
->phys_offset
, orig_memory
,
2447 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2450 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2452 p
->region_offset
= 0;
2454 p
->phys_offset
= phys_offset
;
2455 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2456 (phys_offset
& IO_MEM_ROMD
))
2457 phys_offset
+= TARGET_PAGE_SIZE
;
2460 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2461 p
->phys_offset
= phys_offset
;
2462 p
->region_offset
= region_offset
;
2463 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2464 (phys_offset
& IO_MEM_ROMD
)) {
2465 phys_offset
+= TARGET_PAGE_SIZE
;
2467 target_phys_addr_t start_addr2
, end_addr2
;
2468 int need_subpage
= 0;
2470 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2471 end_addr2
, need_subpage
);
2473 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2474 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2475 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2476 addr
& TARGET_PAGE_MASK
);
2477 subpage_register(subpage
, start_addr2
, end_addr2
,
2478 phys_offset
, region_offset
);
2479 p
->region_offset
= 0;
2483 region_offset
+= TARGET_PAGE_SIZE
;
2486 /* since each CPU stores ram addresses in its TLB cache, we must
2487 reset the modified entries */
2489 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2494 /* XXX: temporary until new memory mapping API */
2495 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2499 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2501 return IO_MEM_UNASSIGNED
;
2502 return p
->phys_offset
;
2505 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2508 kvm_coalesce_mmio_region(addr
, size
);
2511 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2514 kvm_uncoalesce_mmio_region(addr
, size
);
2517 void qemu_flush_coalesced_mmio_buffer(void)
2520 kvm_flush_coalesced_mmio_buffer();
2523 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2525 RAMBlock
*new_block
;
2527 size
= TARGET_PAGE_ALIGN(size
);
2528 new_block
= qemu_malloc(sizeof(*new_block
));
2530 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2531 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2532 new_block
->host
= mmap((void*)0x1000000, size
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2533 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2535 new_block
->host
= qemu_vmalloc(size
);
2537 #ifdef MADV_MERGEABLE
2538 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2540 new_block
->offset
= last_ram_offset
;
2541 new_block
->length
= size
;
2543 new_block
->next
= ram_blocks
;
2544 ram_blocks
= new_block
;
2546 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2547 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2548 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2549 0xff, size
>> TARGET_PAGE_BITS
);
2551 last_ram_offset
+= size
;
2554 kvm_setup_guest_memory(new_block
->host
, size
);
2556 return new_block
->offset
;
2559 void qemu_ram_free(ram_addr_t addr
)
2561 /* TODO: implement this. */
2564 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2565 With the exception of the softmmu code in this file, this should
2566 only be used for local memory (e.g. video ram) that the device owns,
2567 and knows it isn't going to access beyond the end of the block.
2569 It should not be used for general purpose DMA.
2570 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2572 void *qemu_get_ram_ptr(ram_addr_t addr
)
2579 prevp
= &ram_blocks
;
2581 while (block
&& (block
->offset
> addr
2582 || block
->offset
+ block
->length
<= addr
)) {
2584 prevp
= &prev
->next
;
2586 block
= block
->next
;
2589 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2592 /* Move this entry to to start of the list. */
2594 prev
->next
= block
->next
;
2595 block
->next
= *prevp
;
2598 return block
->host
+ (addr
- block
->offset
);
2601 /* Some of the softmmu routines need to translate from a host pointer
2602 (typically a TLB entry) back to a ram offset. */
2603 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2607 uint8_t *host
= ptr
;
2611 while (block
&& (block
->host
> host
2612 || block
->host
+ block
->length
<= host
)) {
2614 block
= block
->next
;
2617 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2620 return block
->offset
+ (host
- block
->host
);
2623 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2625 #ifdef DEBUG_UNASSIGNED
2626 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2628 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2629 do_unassigned_access(addr
, 0, 0, 0, 1);
2634 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2636 #ifdef DEBUG_UNASSIGNED
2637 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2639 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2640 do_unassigned_access(addr
, 0, 0, 0, 2);
2645 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2647 #ifdef DEBUG_UNASSIGNED
2648 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2650 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2651 do_unassigned_access(addr
, 0, 0, 0, 4);
2656 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2658 #ifdef DEBUG_UNASSIGNED
2659 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2661 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2662 do_unassigned_access(addr
, 1, 0, 0, 1);
2666 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2668 #ifdef DEBUG_UNASSIGNED
2669 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2671 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2672 do_unassigned_access(addr
, 1, 0, 0, 2);
2676 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2678 #ifdef DEBUG_UNASSIGNED
2679 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2681 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2682 do_unassigned_access(addr
, 1, 0, 0, 4);
2686 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2687 unassigned_mem_readb
,
2688 unassigned_mem_readw
,
2689 unassigned_mem_readl
,
2692 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2693 unassigned_mem_writeb
,
2694 unassigned_mem_writew
,
2695 unassigned_mem_writel
,
2698 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2702 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2703 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2704 #if !defined(CONFIG_USER_ONLY)
2705 tb_invalidate_phys_page_fast(ram_addr
, 1);
2706 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2709 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2710 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2711 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2712 /* we remove the notdirty callback only if the code has been
2714 if (dirty_flags
== 0xff)
2715 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2718 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2722 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2723 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2724 #if !defined(CONFIG_USER_ONLY)
2725 tb_invalidate_phys_page_fast(ram_addr
, 2);
2726 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2729 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2730 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2731 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2732 /* we remove the notdirty callback only if the code has been
2734 if (dirty_flags
== 0xff)
2735 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2738 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2742 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2743 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2744 #if !defined(CONFIG_USER_ONLY)
2745 tb_invalidate_phys_page_fast(ram_addr
, 4);
2746 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2749 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2750 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2751 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2752 /* we remove the notdirty callback only if the code has been
2754 if (dirty_flags
== 0xff)
2755 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2758 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2759 NULL
, /* never used */
2760 NULL
, /* never used */
2761 NULL
, /* never used */
2764 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2765 notdirty_mem_writeb
,
2766 notdirty_mem_writew
,
2767 notdirty_mem_writel
,
2770 /* Generate a debug exception if a watchpoint has been hit. */
2771 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2773 CPUState
*env
= cpu_single_env
;
2774 target_ulong pc
, cs_base
;
2775 TranslationBlock
*tb
;
2780 if (env
->watchpoint_hit
) {
2781 /* We re-entered the check after replacing the TB. Now raise
2782 * the debug interrupt so that is will trigger after the
2783 * current instruction. */
2784 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2787 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2788 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2789 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2790 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2791 wp
->flags
|= BP_WATCHPOINT_HIT
;
2792 if (!env
->watchpoint_hit
) {
2793 env
->watchpoint_hit
= wp
;
2794 tb
= tb_find_pc(env
->mem_io_pc
);
2796 cpu_abort(env
, "check_watchpoint: could not find TB for "
2797 "pc=%p", (void *)env
->mem_io_pc
);
2799 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2800 tb_phys_invalidate(tb
, -1);
2801 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2802 env
->exception_index
= EXCP_DEBUG
;
2804 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2805 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2807 cpu_resume_from_signal(env
, NULL
);
2810 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2815 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2816 so these check for a hit then pass through to the normal out-of-line
2818 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2820 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2821 return ldub_phys(addr
);
2824 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2826 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2827 return lduw_phys(addr
);
2830 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2832 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2833 return ldl_phys(addr
);
2836 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2839 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2840 stb_phys(addr
, val
);
2843 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2846 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2847 stw_phys(addr
, val
);
2850 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2853 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2854 stl_phys(addr
, val
);
2857 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
2863 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
2869 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2875 idx
= SUBPAGE_IDX(addr
);
2876 #if defined(DEBUG_SUBPAGE)
2877 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2878 mmio
, len
, addr
, idx
);
2880 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2881 addr
+ mmio
->region_offset
[idx
][0][len
]);
2886 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2887 uint32_t value
, unsigned int len
)
2891 idx
= SUBPAGE_IDX(addr
);
2892 #if defined(DEBUG_SUBPAGE)
2893 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2894 mmio
, len
, addr
, idx
, value
);
2896 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2897 addr
+ mmio
->region_offset
[idx
][1][len
],
2901 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2903 #if defined(DEBUG_SUBPAGE)
2904 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2907 return subpage_readlen(opaque
, addr
, 0);
2910 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2913 #if defined(DEBUG_SUBPAGE)
2914 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2916 subpage_writelen(opaque
, addr
, value
, 0);
2919 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2921 #if defined(DEBUG_SUBPAGE)
2922 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2925 return subpage_readlen(opaque
, addr
, 1);
2928 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2931 #if defined(DEBUG_SUBPAGE)
2932 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2934 subpage_writelen(opaque
, addr
, value
, 1);
2937 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2939 #if defined(DEBUG_SUBPAGE)
2940 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2943 return subpage_readlen(opaque
, addr
, 2);
2946 static void subpage_writel (void *opaque
,
2947 target_phys_addr_t addr
, uint32_t value
)
2949 #if defined(DEBUG_SUBPAGE)
2950 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2952 subpage_writelen(opaque
, addr
, value
, 2);
2955 static CPUReadMemoryFunc
* const subpage_read
[] = {
2961 static CPUWriteMemoryFunc
* const subpage_write
[] = {
2967 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2968 ram_addr_t memory
, ram_addr_t region_offset
)
2973 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2975 idx
= SUBPAGE_IDX(start
);
2976 eidx
= SUBPAGE_IDX(end
);
2977 #if defined(DEBUG_SUBPAGE)
2978 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
2979 mmio
, start
, end
, idx
, eidx
, memory
);
2981 memory
>>= IO_MEM_SHIFT
;
2982 for (; idx
<= eidx
; idx
++) {
2983 for (i
= 0; i
< 4; i
++) {
2984 if (io_mem_read
[memory
][i
]) {
2985 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2986 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2987 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2989 if (io_mem_write
[memory
][i
]) {
2990 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2991 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2992 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3000 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3001 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3006 mmio
= qemu_mallocz(sizeof(subpage_t
));
3009 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3010 #if defined(DEBUG_SUBPAGE)
3011 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3012 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3014 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3015 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3021 static int get_free_io_mem_idx(void)
3025 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3026 if (!io_mem_used
[i
]) {
3030 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3034 /* mem_read and mem_write are arrays of functions containing the
3035 function to access byte (index 0), word (index 1) and dword (index
3036 2). Functions can be omitted with a NULL function pointer.
3037 If io_index is non zero, the corresponding io zone is
3038 modified. If it is zero, a new io zone is allocated. The return
3039 value can be used with cpu_register_physical_memory(). (-1) is
3040 returned if error. */
3041 static int cpu_register_io_memory_fixed(int io_index
,
3042 CPUReadMemoryFunc
* const *mem_read
,
3043 CPUWriteMemoryFunc
* const *mem_write
,
3046 int i
, subwidth
= 0;
3048 if (io_index
<= 0) {
3049 io_index
= get_free_io_mem_idx();
3053 io_index
>>= IO_MEM_SHIFT
;
3054 if (io_index
>= IO_MEM_NB_ENTRIES
)
3058 for(i
= 0;i
< 3; i
++) {
3059 if (!mem_read
[i
] || !mem_write
[i
])
3060 subwidth
= IO_MEM_SUBWIDTH
;
3061 io_mem_read
[io_index
][i
] = mem_read
[i
];
3062 io_mem_write
[io_index
][i
] = mem_write
[i
];
3064 io_mem_opaque
[io_index
] = opaque
;
3065 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3068 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3069 CPUWriteMemoryFunc
* const *mem_write
,
3072 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3075 void cpu_unregister_io_memory(int io_table_address
)
3078 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3080 for (i
=0;i
< 3; i
++) {
3081 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3082 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3084 io_mem_opaque
[io_index
] = NULL
;
3085 io_mem_used
[io_index
] = 0;
3088 static void io_mem_init(void)
3092 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3093 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3094 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3098 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3099 watch_mem_write
, NULL
);
3102 #endif /* !defined(CONFIG_USER_ONLY) */
3104 /* physical memory access (slow version, mainly for debug) */
3105 #if defined(CONFIG_USER_ONLY)
3106 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3107 int len
, int is_write
)
3114 page
= addr
& TARGET_PAGE_MASK
;
3115 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3118 flags
= page_get_flags(page
);
3119 if (!(flags
& PAGE_VALID
))
3122 if (!(flags
& PAGE_WRITE
))
3124 /* XXX: this code should not depend on lock_user */
3125 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3126 /* FIXME - should this return an error rather than just fail? */
3129 unlock_user(p
, addr
, l
);
3131 if (!(flags
& PAGE_READ
))
3133 /* XXX: this code should not depend on lock_user */
3134 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3135 /* FIXME - should this return an error rather than just fail? */
3138 unlock_user(p
, addr
, 0);
3147 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3148 int len
, int is_write
)
3153 target_phys_addr_t page
;
3158 page
= addr
& TARGET_PAGE_MASK
;
3159 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3162 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3164 pd
= IO_MEM_UNASSIGNED
;
3166 pd
= p
->phys_offset
;
3170 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3171 target_phys_addr_t addr1
= addr
;
3172 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3174 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3175 /* XXX: could force cpu_single_env to NULL to avoid
3177 if (l
>= 4 && ((addr1
& 3) == 0)) {
3178 /* 32 bit write access */
3180 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3182 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3183 /* 16 bit write access */
3185 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3188 /* 8 bit write access */
3190 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3194 unsigned long addr1
;
3195 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3197 ptr
= qemu_get_ram_ptr(addr1
);
3198 memcpy(ptr
, buf
, l
);
3199 if (!cpu_physical_memory_is_dirty(addr1
)) {
3200 /* invalidate code */
3201 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3203 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3204 (0xff & ~CODE_DIRTY_FLAG
);
3208 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3209 !(pd
& IO_MEM_ROMD
)) {
3210 target_phys_addr_t addr1
= addr
;
3212 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3214 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3215 if (l
>= 4 && ((addr1
& 3) == 0)) {
3216 /* 32 bit read access */
3217 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3220 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3221 /* 16 bit read access */
3222 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3226 /* 8 bit read access */
3227 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3233 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3234 (addr
& ~TARGET_PAGE_MASK
);
3235 memcpy(buf
, ptr
, l
);
3244 /* used for ROM loading : can write in RAM and ROM */
3245 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3246 const uint8_t *buf
, int len
)
3250 target_phys_addr_t page
;
3255 page
= addr
& TARGET_PAGE_MASK
;
3256 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3259 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3261 pd
= IO_MEM_UNASSIGNED
;
3263 pd
= p
->phys_offset
;
3266 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3267 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3268 !(pd
& IO_MEM_ROMD
)) {
3271 unsigned long addr1
;
3272 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3274 ptr
= qemu_get_ram_ptr(addr1
);
3275 memcpy(ptr
, buf
, l
);
3285 target_phys_addr_t addr
;
3286 target_phys_addr_t len
;
3289 static BounceBuffer bounce
;
3291 typedef struct MapClient
{
3293 void (*callback
)(void *opaque
);
3294 QLIST_ENTRY(MapClient
) link
;
3297 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3298 = QLIST_HEAD_INITIALIZER(map_client_list
);
3300 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3302 MapClient
*client
= qemu_malloc(sizeof(*client
));
3304 client
->opaque
= opaque
;
3305 client
->callback
= callback
;
3306 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3310 void cpu_unregister_map_client(void *_client
)
3312 MapClient
*client
= (MapClient
*)_client
;
3314 QLIST_REMOVE(client
, link
);
3318 static void cpu_notify_map_clients(void)
3322 while (!QLIST_EMPTY(&map_client_list
)) {
3323 client
= QLIST_FIRST(&map_client_list
);
3324 client
->callback(client
->opaque
);
3325 cpu_unregister_map_client(client
);
3329 /* Map a physical memory region into a host virtual address.
3330 * May map a subset of the requested range, given by and returned in *plen.
3331 * May return NULL if resources needed to perform the mapping are exhausted.
3332 * Use only for reads OR writes - not for read-modify-write operations.
3333 * Use cpu_register_map_client() to know when retrying the map operation is
3334 * likely to succeed.
3336 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3337 target_phys_addr_t
*plen
,
3340 target_phys_addr_t len
= *plen
;
3341 target_phys_addr_t done
= 0;
3343 uint8_t *ret
= NULL
;
3345 target_phys_addr_t page
;
3348 unsigned long addr1
;
3351 page
= addr
& TARGET_PAGE_MASK
;
3352 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3355 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3357 pd
= IO_MEM_UNASSIGNED
;
3359 pd
= p
->phys_offset
;
3362 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3363 if (done
|| bounce
.buffer
) {
3366 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3370 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3372 ptr
= bounce
.buffer
;
3374 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3375 ptr
= qemu_get_ram_ptr(addr1
);
3379 } else if (ret
+ done
!= ptr
) {
3391 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3392 * Will also mark the memory as dirty if is_write == 1. access_len gives
3393 * the amount of memory that was actually read or written by the caller.
3395 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3396 int is_write
, target_phys_addr_t access_len
)
3398 if (buffer
!= bounce
.buffer
) {
3400 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3401 while (access_len
) {
3403 l
= TARGET_PAGE_SIZE
;
3406 if (!cpu_physical_memory_is_dirty(addr1
)) {
3407 /* invalidate code */
3408 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3410 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3411 (0xff & ~CODE_DIRTY_FLAG
);
3420 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3422 qemu_vfree(bounce
.buffer
);
3423 bounce
.buffer
= NULL
;
3424 cpu_notify_map_clients();
3427 /* warning: addr must be aligned */
3428 uint32_t ldl_phys(target_phys_addr_t addr
)
3436 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3438 pd
= IO_MEM_UNASSIGNED
;
3440 pd
= p
->phys_offset
;
3443 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3444 !(pd
& IO_MEM_ROMD
)) {
3446 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3448 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3449 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3452 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3453 (addr
& ~TARGET_PAGE_MASK
);
3459 /* warning: addr must be aligned */
3460 uint64_t ldq_phys(target_phys_addr_t addr
)
3468 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3470 pd
= IO_MEM_UNASSIGNED
;
3472 pd
= p
->phys_offset
;
3475 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3476 !(pd
& IO_MEM_ROMD
)) {
3478 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3480 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3481 #ifdef TARGET_WORDS_BIGENDIAN
3482 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3483 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3485 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3486 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3490 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3491 (addr
& ~TARGET_PAGE_MASK
);
3498 uint32_t ldub_phys(target_phys_addr_t addr
)
3501 cpu_physical_memory_read(addr
, &val
, 1);
3506 uint32_t lduw_phys(target_phys_addr_t addr
)
3509 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3510 return tswap16(val
);
3513 /* warning: addr must be aligned. The ram page is not masked as dirty
3514 and the code inside is not invalidated. It is useful if the dirty
3515 bits are used to track modified PTEs */
3516 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3523 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3525 pd
= IO_MEM_UNASSIGNED
;
3527 pd
= p
->phys_offset
;
3530 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3531 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3533 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3534 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3536 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3537 ptr
= qemu_get_ram_ptr(addr1
);
3540 if (unlikely(in_migration
)) {
3541 if (!cpu_physical_memory_is_dirty(addr1
)) {
3542 /* invalidate code */
3543 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3545 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3546 (0xff & ~CODE_DIRTY_FLAG
);
3552 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3559 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3561 pd
= IO_MEM_UNASSIGNED
;
3563 pd
= p
->phys_offset
;
3566 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3567 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3569 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3570 #ifdef TARGET_WORDS_BIGENDIAN
3571 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3572 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3574 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3575 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3578 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3579 (addr
& ~TARGET_PAGE_MASK
);
3584 /* warning: addr must be aligned */
3585 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3592 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3594 pd
= IO_MEM_UNASSIGNED
;
3596 pd
= p
->phys_offset
;
3599 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3600 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3602 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3603 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3605 unsigned long addr1
;
3606 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3608 ptr
= qemu_get_ram_ptr(addr1
);
3610 if (!cpu_physical_memory_is_dirty(addr1
)) {
3611 /* invalidate code */
3612 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3614 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3615 (0xff & ~CODE_DIRTY_FLAG
);
3621 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3624 cpu_physical_memory_write(addr
, &v
, 1);
3628 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3630 uint16_t v
= tswap16(val
);
3631 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3635 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3638 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3643 /* virtual memory access for debug (includes writing to ROM) */
3644 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3645 uint8_t *buf
, int len
, int is_write
)
3648 target_phys_addr_t phys_addr
;
3652 page
= addr
& TARGET_PAGE_MASK
;
3653 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3654 /* if no physical page mapped, return an error */
3655 if (phys_addr
== -1)
3657 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3660 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3661 #if !defined(CONFIG_USER_ONLY)
3663 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3666 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3674 /* in deterministic execution mode, instructions doing device I/Os
3675 must be at the end of the TB */
3676 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3678 TranslationBlock
*tb
;
3680 target_ulong pc
, cs_base
;
3683 tb
= tb_find_pc((unsigned long)retaddr
);
3685 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3688 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3689 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3690 /* Calculate how many instructions had been executed before the fault
3692 n
= n
- env
->icount_decr
.u16
.low
;
3693 /* Generate a new TB ending on the I/O insn. */
3695 /* On MIPS and SH, delay slot instructions can only be restarted if
3696 they were already the first instruction in the TB. If this is not
3697 the first instruction in a TB then re-execute the preceding
3699 #if defined(TARGET_MIPS)
3700 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3701 env
->active_tc
.PC
-= 4;
3702 env
->icount_decr
.u16
.low
++;
3703 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3705 #elif defined(TARGET_SH4)
3706 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3709 env
->icount_decr
.u16
.low
++;
3710 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3713 /* This should never happen. */
3714 if (n
> CF_COUNT_MASK
)
3715 cpu_abort(env
, "TB too big during recompile");
3717 cflags
= n
| CF_LAST_IO
;
3719 cs_base
= tb
->cs_base
;
3721 tb_phys_invalidate(tb
, -1);
3722 /* FIXME: In theory this could raise an exception. In practice
3723 we have already translated the block once so it's probably ok. */
3724 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3725 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3726 the first in the TB) then we end up generating a whole new TB and
3727 repeating the fault, which is horribly inefficient.
3728 Better would be to execute just this insn uncached, or generate a
3730 cpu_resume_from_signal(env
, NULL
);
3733 void dump_exec_info(FILE *f
,
3734 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3736 int i
, target_code_size
, max_target_code_size
;
3737 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3738 TranslationBlock
*tb
;
3740 target_code_size
= 0;
3741 max_target_code_size
= 0;
3743 direct_jmp_count
= 0;
3744 direct_jmp2_count
= 0;
3745 for(i
= 0; i
< nb_tbs
; i
++) {
3747 target_code_size
+= tb
->size
;
3748 if (tb
->size
> max_target_code_size
)
3749 max_target_code_size
= tb
->size
;
3750 if (tb
->page_addr
[1] != -1)
3752 if (tb
->tb_next_offset
[0] != 0xffff) {
3754 if (tb
->tb_next_offset
[1] != 0xffff) {
3755 direct_jmp2_count
++;
3759 /* XXX: avoid using doubles ? */
3760 cpu_fprintf(f
, "Translation buffer state:\n");
3761 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3762 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3763 cpu_fprintf(f
, "TB count %d/%d\n",
3764 nb_tbs
, code_gen_max_blocks
);
3765 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3766 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3767 max_target_code_size
);
3768 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3769 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3770 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3771 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3773 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3774 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3776 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3778 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3779 cpu_fprintf(f
, "\nStatistics:\n");
3780 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3781 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3782 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3783 tcg_dump_info(f
, cpu_fprintf
);
3786 #if !defined(CONFIG_USER_ONLY)
3788 #define MMUSUFFIX _cmmu
3789 #define GETPC() NULL
3790 #define env cpu_single_env
3791 #define SOFTMMU_CODE_ACCESS
3794 #include "softmmu_template.h"
3797 #include "softmmu_template.h"
3800 #include "softmmu_template.h"
3803 #include "softmmu_template.h"