2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 static TranslationBlock
*tbs
;
66 int code_gen_max_blocks
;
67 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
69 /* any access to the tbs or the page table must use this lock */
70 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
72 #if defined(__arm__) || defined(__sparc_v9__)
73 /* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
75 section close to code segment. */
76 #define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
80 /* Maximum alignment for Win32 is 16. */
81 #define code_gen_section \
82 __attribute__((aligned (16)))
84 #define code_gen_section \
85 __attribute__((aligned (32)))
88 uint8_t code_gen_prologue
[1024] code_gen_section
;
89 static uint8_t *code_gen_buffer
;
90 static unsigned long code_gen_buffer_size
;
91 /* threshold to flush the translated code buffer */
92 static unsigned long code_gen_buffer_max_size
;
93 uint8_t *code_gen_ptr
;
95 #if !defined(CONFIG_USER_ONLY)
97 uint8_t *phys_ram_dirty
;
98 static int in_migration
;
100 typedef struct RAMBlock
{
104 struct RAMBlock
*next
;
107 static RAMBlock
*ram_blocks
;
108 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
109 then we can no longer assume contiguous ram offsets, and external uses
110 of this variable will break. */
111 ram_addr_t last_ram_offset
;
115 /* current CPU in the current thread. It is only valid inside
117 CPUState
*cpu_single_env
;
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
122 /* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
126 typedef struct PageDesc
{
127 /* list of TBs intersecting this ram page */
128 TranslationBlock
*first_tb
;
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count
;
132 uint8_t *code_bitmap
;
133 #if defined(CONFIG_USER_ONLY)
138 typedef struct PhysPageDesc
{
139 /* offset in host memory of the page + io_index in the low bits */
140 ram_addr_t phys_offset
;
141 ram_addr_t region_offset
;
144 /* In system mode we want L1_MAP to be based on physical addresses,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
149 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
152 /* Size of the L2 (and L3, etc) page tables. */
154 #define L2_SIZE (1 << L2_BITS)
156 /* The bits remaining after N lower levels of page tables. */
157 #define P_L1_BITS_REM \
158 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159 #define V_L1_BITS_REM \
160 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162 /* Size of the L1 page table. Avoid silly small sizes. */
163 #if P_L1_BITS_REM < 4
164 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
166 #define P_L1_BITS P_L1_BITS_REM
169 #if V_L1_BITS_REM < 4
170 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172 #define V_L1_BITS V_L1_BITS_REM
175 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
179 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
181 unsigned long qemu_real_host_page_size
;
182 unsigned long qemu_host_page_bits
;
183 unsigned long qemu_host_page_size
;
184 unsigned long qemu_host_page_mask
;
186 /* This is a multi-level map on the virtual address space.
187 The bottom level has pointers to PageDesc. */
188 static void *l1_map
[V_L1_SIZE
];
190 #if !defined(CONFIG_USER_ONLY)
191 /* This is a multi-level map on the physical address space.
192 The bottom level has pointers to PhysPageDesc. */
193 static void *l1_phys_map
[P_L1_SIZE
];
195 static void io_mem_init(void);
197 /* io memory support */
198 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
199 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
200 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
201 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
202 static int io_mem_watch
;
207 static const char *logfilename
= "qemu.log";
209 static const char *logfilename
= "/tmp/qemu.log";
213 static int log_append
= 0;
216 static int tlb_flush_count
;
217 static int tb_flush_count
;
218 static int tb_phys_invalidate_count
;
221 static void map_exec(void *addr
, long size
)
224 VirtualProtect(addr
, size
,
225 PAGE_EXECUTE_READWRITE
, &old_protect
);
229 static void map_exec(void *addr
, long size
)
231 unsigned long start
, end
, page_size
;
233 page_size
= getpagesize();
234 start
= (unsigned long)addr
;
235 start
&= ~(page_size
- 1);
237 end
= (unsigned long)addr
+ size
;
238 end
+= page_size
- 1;
239 end
&= ~(page_size
- 1);
241 mprotect((void *)start
, end
- start
,
242 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
246 static void page_init(void)
248 /* NOTE: we can always suppose that qemu_host_page_size >=
252 SYSTEM_INFO system_info
;
254 GetSystemInfo(&system_info
);
255 qemu_real_host_page_size
= system_info
.dwPageSize
;
258 qemu_real_host_page_size
= getpagesize();
260 if (qemu_host_page_size
== 0)
261 qemu_host_page_size
= qemu_real_host_page_size
;
262 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
263 qemu_host_page_size
= TARGET_PAGE_SIZE
;
264 qemu_host_page_bits
= 0;
265 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
266 qemu_host_page_bits
++;
267 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
269 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
273 last_brk
= (unsigned long)sbrk(0);
275 f
= fopen("/proc/self/maps", "r");
280 unsigned long startaddr
, endaddr
;
283 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
285 if (n
== 2 && h2g_valid(startaddr
)) {
286 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
288 if (h2g_valid(endaddr
)) {
289 endaddr
= h2g(endaddr
);
293 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
304 static PageDesc
*page_find_alloc(target_ulong index
, int alloc
)
306 #if defined(CONFIG_USER_ONLY)
307 /* We can't use qemu_malloc because it may recurse into a locked mutex.
308 Neither can we record the new pages we reserve while allocating a
309 given page because that may recurse into an unallocated page table
310 entry. Stuff the allocations we do make into a queue and process
311 them after having completed one entire page table allocation. */
313 unsigned long reserve
[2 * (V_L1_SHIFT
/ L2_BITS
)];
316 # define ALLOC(P, SIZE) \
318 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
319 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
320 if (h2g_valid(P)) { \
321 reserve[reserve_idx] = h2g(P); \
322 reserve[reserve_idx + 1] = SIZE; \
327 # define ALLOC(P, SIZE) \
328 do { P = qemu_mallocz(SIZE); } while (0)
335 /* Level 1. Always allocated. */
336 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
339 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
346 ALLOC(p
, sizeof(void *) * L2_SIZE
);
350 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
358 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
363 #if defined(CONFIG_USER_ONLY)
364 for (i
= 0; i
< reserve_idx
; i
+= 2) {
365 unsigned long addr
= reserve
[i
];
366 unsigned long len
= reserve
[i
+ 1];
368 page_set_flags(addr
& TARGET_PAGE_MASK
,
369 TARGET_PAGE_ALIGN(addr
+ len
),
374 return pd
+ (index
& (L2_SIZE
- 1));
377 static inline PageDesc
*page_find(target_ulong index
)
379 return page_find_alloc(index
, 0);
382 #if !defined(CONFIG_USER_ONLY)
383 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
389 /* Level 1. Always allocated. */
390 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
393 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
399 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
401 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
412 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
414 for (i
= 0; i
< L2_SIZE
; i
++) {
415 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
416 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
420 return pd
+ (index
& (L2_SIZE
- 1));
423 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
425 return phys_page_find_alloc(index
, 0);
428 static void tlb_protect_code(ram_addr_t ram_addr
);
429 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
431 #define mmap_lock() do { } while(0)
432 #define mmap_unlock() do { } while(0)
435 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
437 #if defined(CONFIG_USER_ONLY)
438 /* Currently it is not recommended to allocate big chunks of data in
439 user mode. It will change when a dedicated libc will be used */
440 #define USE_STATIC_CODE_GEN_BUFFER
443 #ifdef USE_STATIC_CODE_GEN_BUFFER
444 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
447 static void code_gen_alloc(unsigned long tb_size
)
449 #ifdef USE_STATIC_CODE_GEN_BUFFER
450 code_gen_buffer
= static_code_gen_buffer
;
451 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
452 map_exec(code_gen_buffer
, code_gen_buffer_size
);
454 code_gen_buffer_size
= tb_size
;
455 if (code_gen_buffer_size
== 0) {
456 #if defined(CONFIG_USER_ONLY)
457 /* in user mode, phys_ram_size is not meaningful */
458 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
460 /* XXX: needs adjustments */
461 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
464 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
465 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
466 /* The code gen buffer location may have constraints depending on
467 the host cpu and OS */
468 #if defined(__linux__)
473 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
474 #if defined(__x86_64__)
476 /* Cannot map more than that */
477 if (code_gen_buffer_size
> (800 * 1024 * 1024))
478 code_gen_buffer_size
= (800 * 1024 * 1024);
479 #elif defined(__sparc_v9__)
480 // Map the buffer below 2G, so we can use direct calls and branches
482 start
= (void *) 0x60000000UL
;
483 if (code_gen_buffer_size
> (512 * 1024 * 1024))
484 code_gen_buffer_size
= (512 * 1024 * 1024);
485 #elif defined(__arm__)
486 /* Map the buffer below 32M, so we can use direct calls and branches */
488 start
= (void *) 0x01000000UL
;
489 if (code_gen_buffer_size
> 16 * 1024 * 1024)
490 code_gen_buffer_size
= 16 * 1024 * 1024;
492 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
493 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
495 if (code_gen_buffer
== MAP_FAILED
) {
496 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
500 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
504 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
505 #if defined(__x86_64__)
506 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
507 * 0x40000000 is free */
509 addr
= (void *)0x40000000;
510 /* Cannot map more than that */
511 if (code_gen_buffer_size
> (800 * 1024 * 1024))
512 code_gen_buffer_size
= (800 * 1024 * 1024);
514 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
515 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
517 if (code_gen_buffer
== MAP_FAILED
) {
518 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
523 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
524 map_exec(code_gen_buffer
, code_gen_buffer_size
);
526 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
527 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
528 code_gen_buffer_max_size
= code_gen_buffer_size
-
529 code_gen_max_block_size();
530 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
531 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
534 /* Must be called before using the QEMU cpus. 'tb_size' is the size
535 (in bytes) allocated to the translation buffer. Zero means default
537 void cpu_exec_init_all(unsigned long tb_size
)
540 code_gen_alloc(tb_size
);
541 code_gen_ptr
= code_gen_buffer
;
543 #if !defined(CONFIG_USER_ONLY)
548 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
550 static int cpu_common_post_load(void *opaque
, int version_id
)
552 CPUState
*env
= opaque
;
554 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
555 version_id is increased. */
556 env
->interrupt_request
&= ~0x01;
562 static const VMStateDescription vmstate_cpu_common
= {
563 .name
= "cpu_common",
565 .minimum_version_id
= 1,
566 .minimum_version_id_old
= 1,
567 .post_load
= cpu_common_post_load
,
568 .fields
= (VMStateField
[]) {
569 VMSTATE_UINT32(halted
, CPUState
),
570 VMSTATE_UINT32(interrupt_request
, CPUState
),
571 VMSTATE_END_OF_LIST()
576 CPUState
*qemu_get_cpu(int cpu
)
578 CPUState
*env
= first_cpu
;
581 if (env
->cpu_index
== cpu
)
589 void cpu_exec_init(CPUState
*env
)
594 #if defined(CONFIG_USER_ONLY)
597 env
->next_cpu
= NULL
;
600 while (*penv
!= NULL
) {
601 penv
= &(*penv
)->next_cpu
;
604 env
->cpu_index
= cpu_index
;
606 QTAILQ_INIT(&env
->breakpoints
);
607 QTAILQ_INIT(&env
->watchpoints
);
609 #if defined(CONFIG_USER_ONLY)
612 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
613 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
614 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
615 cpu_save
, cpu_load
, env
);
619 static inline void invalidate_page_bitmap(PageDesc
*p
)
621 if (p
->code_bitmap
) {
622 qemu_free(p
->code_bitmap
);
623 p
->code_bitmap
= NULL
;
625 p
->code_write_count
= 0;
628 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
630 static void page_flush_tb_1 (int level
, void **lp
)
639 for (i
= 0; i
< L2_BITS
; ++i
) {
640 pd
[i
].first_tb
= NULL
;
641 invalidate_page_bitmap(pd
+ i
);
645 for (i
= 0; i
< L2_BITS
; ++i
) {
646 page_flush_tb_1 (level
- 1, pp
+ i
);
651 static void page_flush_tb(void)
654 for (i
= 0; i
< V_L1_SIZE
; i
++) {
655 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
659 /* flush all the translation blocks */
660 /* XXX: tb_flush is currently not thread safe */
661 void tb_flush(CPUState
*env1
)
664 #if defined(DEBUG_FLUSH)
665 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
666 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
668 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
670 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
671 cpu_abort(env1
, "Internal error: code buffer overflow\n");
675 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
676 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
679 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
682 code_gen_ptr
= code_gen_buffer
;
683 /* XXX: flush processor icache at this point if cache flush is
688 #ifdef DEBUG_TB_CHECK
690 static void tb_invalidate_check(target_ulong address
)
692 TranslationBlock
*tb
;
694 address
&= TARGET_PAGE_MASK
;
695 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
696 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
697 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
698 address
>= tb
->pc
+ tb
->size
)) {
699 printf("ERROR invalidate: address=" TARGET_FMT_lx
700 " PC=%08lx size=%04x\n",
701 address
, (long)tb
->pc
, tb
->size
);
707 /* verify that all the pages have correct rights for code */
708 static void tb_page_check(void)
710 TranslationBlock
*tb
;
711 int i
, flags1
, flags2
;
713 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
714 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
715 flags1
= page_get_flags(tb
->pc
);
716 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
717 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
718 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
719 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
727 /* invalidate one TB */
728 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
731 TranslationBlock
*tb1
;
735 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
738 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
742 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
744 TranslationBlock
*tb1
;
750 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
752 *ptb
= tb1
->page_next
[n1
];
755 ptb
= &tb1
->page_next
[n1
];
759 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
761 TranslationBlock
*tb1
, **ptb
;
764 ptb
= &tb
->jmp_next
[n
];
767 /* find tb(n) in circular list */
771 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
772 if (n1
== n
&& tb1
== tb
)
775 ptb
= &tb1
->jmp_first
;
777 ptb
= &tb1
->jmp_next
[n1
];
780 /* now we can suppress tb(n) from the list */
781 *ptb
= tb
->jmp_next
[n
];
783 tb
->jmp_next
[n
] = NULL
;
787 /* reset the jump entry 'n' of a TB so that it is not chained to
789 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
791 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
794 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
799 target_phys_addr_t phys_pc
;
800 TranslationBlock
*tb1
, *tb2
;
802 /* remove the TB from the hash list */
803 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
804 h
= tb_phys_hash_func(phys_pc
);
805 tb_remove(&tb_phys_hash
[h
], tb
,
806 offsetof(TranslationBlock
, phys_hash_next
));
808 /* remove the TB from the page list */
809 if (tb
->page_addr
[0] != page_addr
) {
810 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
811 tb_page_remove(&p
->first_tb
, tb
);
812 invalidate_page_bitmap(p
);
814 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
815 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
816 tb_page_remove(&p
->first_tb
, tb
);
817 invalidate_page_bitmap(p
);
820 tb_invalidated_flag
= 1;
822 /* remove the TB from the hash list */
823 h
= tb_jmp_cache_hash_func(tb
->pc
);
824 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
825 if (env
->tb_jmp_cache
[h
] == tb
)
826 env
->tb_jmp_cache
[h
] = NULL
;
829 /* suppress this TB from the two jump lists */
830 tb_jmp_remove(tb
, 0);
831 tb_jmp_remove(tb
, 1);
833 /* suppress any remaining jumps to this TB */
839 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
840 tb2
= tb1
->jmp_next
[n1
];
841 tb_reset_jump(tb1
, n1
);
842 tb1
->jmp_next
[n1
] = NULL
;
845 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
847 tb_phys_invalidate_count
++;
850 static inline void set_bits(uint8_t *tab
, int start
, int len
)
856 mask
= 0xff << (start
& 7);
857 if ((start
& ~7) == (end
& ~7)) {
859 mask
&= ~(0xff << (end
& 7));
864 start
= (start
+ 8) & ~7;
866 while (start
< end1
) {
871 mask
= ~(0xff << (end
& 7));
877 static void build_page_bitmap(PageDesc
*p
)
879 int n
, tb_start
, tb_end
;
880 TranslationBlock
*tb
;
882 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
887 tb
= (TranslationBlock
*)((long)tb
& ~3);
888 /* NOTE: this is subtle as a TB may span two physical pages */
890 /* NOTE: tb_end may be after the end of the page, but
891 it is not a problem */
892 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
893 tb_end
= tb_start
+ tb
->size
;
894 if (tb_end
> TARGET_PAGE_SIZE
)
895 tb_end
= TARGET_PAGE_SIZE
;
898 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
900 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
901 tb
= tb
->page_next
[n
];
905 TranslationBlock
*tb_gen_code(CPUState
*env
,
906 target_ulong pc
, target_ulong cs_base
,
907 int flags
, int cflags
)
909 TranslationBlock
*tb
;
911 target_ulong phys_pc
, phys_page2
, virt_page2
;
914 phys_pc
= get_phys_addr_code(env
, pc
);
917 /* flush must be done */
919 /* cannot fail at this point */
921 /* Don't forget to invalidate previous TB info. */
922 tb_invalidated_flag
= 1;
924 tc_ptr
= code_gen_ptr
;
926 tb
->cs_base
= cs_base
;
929 cpu_gen_code(env
, tb
, &code_gen_size
);
930 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
932 /* check next page if needed */
933 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
935 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
936 phys_page2
= get_phys_addr_code(env
, virt_page2
);
938 tb_link_phys(tb
, phys_pc
, phys_page2
);
942 /* invalidate all TBs which intersect with the target physical page
943 starting in range [start;end[. NOTE: start and end must refer to
944 the same physical page. 'is_cpu_write_access' should be true if called
945 from a real cpu write access: the virtual CPU will exit the current
946 TB if code is modified inside this TB. */
947 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
948 int is_cpu_write_access
)
950 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
951 CPUState
*env
= cpu_single_env
;
952 target_ulong tb_start
, tb_end
;
955 #ifdef TARGET_HAS_PRECISE_SMC
956 int current_tb_not_found
= is_cpu_write_access
;
957 TranslationBlock
*current_tb
= NULL
;
958 int current_tb_modified
= 0;
959 target_ulong current_pc
= 0;
960 target_ulong current_cs_base
= 0;
961 int current_flags
= 0;
962 #endif /* TARGET_HAS_PRECISE_SMC */
964 p
= page_find(start
>> TARGET_PAGE_BITS
);
967 if (!p
->code_bitmap
&&
968 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
969 is_cpu_write_access
) {
970 /* build code bitmap */
971 build_page_bitmap(p
);
974 /* we remove all the TBs in the range [start, end[ */
975 /* XXX: see if in some cases it could be faster to invalidate all the code */
979 tb
= (TranslationBlock
*)((long)tb
& ~3);
980 tb_next
= tb
->page_next
[n
];
981 /* NOTE: this is subtle as a TB may span two physical pages */
983 /* NOTE: tb_end may be after the end of the page, but
984 it is not a problem */
985 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
986 tb_end
= tb_start
+ tb
->size
;
988 tb_start
= tb
->page_addr
[1];
989 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
991 if (!(tb_end
<= start
|| tb_start
>= end
)) {
992 #ifdef TARGET_HAS_PRECISE_SMC
993 if (current_tb_not_found
) {
994 current_tb_not_found
= 0;
996 if (env
->mem_io_pc
) {
997 /* now we have a real cpu fault */
998 current_tb
= tb_find_pc(env
->mem_io_pc
);
1001 if (current_tb
== tb
&&
1002 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1003 /* If we are modifying the current TB, we must stop
1004 its execution. We could be more precise by checking
1005 that the modification is after the current PC, but it
1006 would require a specialized function to partially
1007 restore the CPU state */
1009 current_tb_modified
= 1;
1010 cpu_restore_state(current_tb
, env
,
1011 env
->mem_io_pc
, NULL
);
1012 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1015 #endif /* TARGET_HAS_PRECISE_SMC */
1016 /* we need to do that to handle the case where a signal
1017 occurs while doing tb_phys_invalidate() */
1020 saved_tb
= env
->current_tb
;
1021 env
->current_tb
= NULL
;
1023 tb_phys_invalidate(tb
, -1);
1025 env
->current_tb
= saved_tb
;
1026 if (env
->interrupt_request
&& env
->current_tb
)
1027 cpu_interrupt(env
, env
->interrupt_request
);
1032 #if !defined(CONFIG_USER_ONLY)
1033 /* if no code remaining, no need to continue to use slow writes */
1035 invalidate_page_bitmap(p
);
1036 if (is_cpu_write_access
) {
1037 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1041 #ifdef TARGET_HAS_PRECISE_SMC
1042 if (current_tb_modified
) {
1043 /* we generate a block containing just the instruction
1044 modifying the memory. It will ensure that it cannot modify
1046 env
->current_tb
= NULL
;
1047 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1048 cpu_resume_from_signal(env
, NULL
);
1053 /* len must be <= 8 and start must be a multiple of len */
1054 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1060 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1061 cpu_single_env
->mem_io_vaddr
, len
,
1062 cpu_single_env
->eip
,
1063 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1066 p
= page_find(start
>> TARGET_PAGE_BITS
);
1069 if (p
->code_bitmap
) {
1070 offset
= start
& ~TARGET_PAGE_MASK
;
1071 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1072 if (b
& ((1 << len
) - 1))
1076 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1080 #if !defined(CONFIG_SOFTMMU)
1081 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1082 unsigned long pc
, void *puc
)
1084 TranslationBlock
*tb
;
1087 #ifdef TARGET_HAS_PRECISE_SMC
1088 TranslationBlock
*current_tb
= NULL
;
1089 CPUState
*env
= cpu_single_env
;
1090 int current_tb_modified
= 0;
1091 target_ulong current_pc
= 0;
1092 target_ulong current_cs_base
= 0;
1093 int current_flags
= 0;
1096 addr
&= TARGET_PAGE_MASK
;
1097 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1101 #ifdef TARGET_HAS_PRECISE_SMC
1102 if (tb
&& pc
!= 0) {
1103 current_tb
= tb_find_pc(pc
);
1106 while (tb
!= NULL
) {
1108 tb
= (TranslationBlock
*)((long)tb
& ~3);
1109 #ifdef TARGET_HAS_PRECISE_SMC
1110 if (current_tb
== tb
&&
1111 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1112 /* If we are modifying the current TB, we must stop
1113 its execution. We could be more precise by checking
1114 that the modification is after the current PC, but it
1115 would require a specialized function to partially
1116 restore the CPU state */
1118 current_tb_modified
= 1;
1119 cpu_restore_state(current_tb
, env
, pc
, puc
);
1120 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1123 #endif /* TARGET_HAS_PRECISE_SMC */
1124 tb_phys_invalidate(tb
, addr
);
1125 tb
= tb
->page_next
[n
];
1128 #ifdef TARGET_HAS_PRECISE_SMC
1129 if (current_tb_modified
) {
1130 /* we generate a block containing just the instruction
1131 modifying the memory. It will ensure that it cannot modify
1133 env
->current_tb
= NULL
;
1134 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1135 cpu_resume_from_signal(env
, puc
);
1141 /* add the tb in the target page and protect it if necessary */
1142 static inline void tb_alloc_page(TranslationBlock
*tb
,
1143 unsigned int n
, target_ulong page_addr
)
1146 TranslationBlock
*last_first_tb
;
1148 tb
->page_addr
[n
] = page_addr
;
1149 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1150 tb
->page_next
[n
] = p
->first_tb
;
1151 last_first_tb
= p
->first_tb
;
1152 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1153 invalidate_page_bitmap(p
);
1155 #if defined(TARGET_HAS_SMC) || 1
1157 #if defined(CONFIG_USER_ONLY)
1158 if (p
->flags
& PAGE_WRITE
) {
1163 /* force the host page as non writable (writes will have a
1164 page fault + mprotect overhead) */
1165 page_addr
&= qemu_host_page_mask
;
1167 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1168 addr
+= TARGET_PAGE_SIZE
) {
1170 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1174 p2
->flags
&= ~PAGE_WRITE
;
1175 page_get_flags(addr
);
1177 mprotect(g2h(page_addr
), qemu_host_page_size
,
1178 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1179 #ifdef DEBUG_TB_INVALIDATE
1180 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1185 /* if some code is already present, then the pages are already
1186 protected. So we handle the case where only the first TB is
1187 allocated in a physical page */
1188 if (!last_first_tb
) {
1189 tlb_protect_code(page_addr
);
1193 #endif /* TARGET_HAS_SMC */
1196 /* Allocate a new translation block. Flush the translation buffer if
1197 too many translation blocks or too much generated code. */
1198 TranslationBlock
*tb_alloc(target_ulong pc
)
1200 TranslationBlock
*tb
;
1202 if (nb_tbs
>= code_gen_max_blocks
||
1203 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1205 tb
= &tbs
[nb_tbs
++];
1211 void tb_free(TranslationBlock
*tb
)
1213 /* In practice this is mostly used for single use temporary TB
1214 Ignore the hard cases and just back up if this TB happens to
1215 be the last one generated. */
1216 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1217 code_gen_ptr
= tb
->tc_ptr
;
1222 /* add a new TB and link it to the physical page tables. phys_page2 is
1223 (-1) to indicate that only one page contains the TB. */
1224 void tb_link_phys(TranslationBlock
*tb
,
1225 target_ulong phys_pc
, target_ulong phys_page2
)
1228 TranslationBlock
**ptb
;
1230 /* Grab the mmap lock to stop another thread invalidating this TB
1231 before we are done. */
1233 /* add in the physical hash table */
1234 h
= tb_phys_hash_func(phys_pc
);
1235 ptb
= &tb_phys_hash
[h
];
1236 tb
->phys_hash_next
= *ptb
;
1239 /* add in the page list */
1240 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1241 if (phys_page2
!= -1)
1242 tb_alloc_page(tb
, 1, phys_page2
);
1244 tb
->page_addr
[1] = -1;
1246 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1247 tb
->jmp_next
[0] = NULL
;
1248 tb
->jmp_next
[1] = NULL
;
1250 /* init original jump addresses */
1251 if (tb
->tb_next_offset
[0] != 0xffff)
1252 tb_reset_jump(tb
, 0);
1253 if (tb
->tb_next_offset
[1] != 0xffff)
1254 tb_reset_jump(tb
, 1);
1256 #ifdef DEBUG_TB_CHECK
1262 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1263 tb[1].tc_ptr. Return NULL if not found */
1264 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1266 int m_min
, m_max
, m
;
1268 TranslationBlock
*tb
;
1272 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1273 tc_ptr
>= (unsigned long)code_gen_ptr
)
1275 /* binary search (cf Knuth) */
1278 while (m_min
<= m_max
) {
1279 m
= (m_min
+ m_max
) >> 1;
1281 v
= (unsigned long)tb
->tc_ptr
;
1284 else if (tc_ptr
< v
) {
1293 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1295 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1297 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1300 tb1
= tb
->jmp_next
[n
];
1302 /* find head of list */
1305 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1308 tb1
= tb1
->jmp_next
[n1
];
1310 /* we are now sure now that tb jumps to tb1 */
1313 /* remove tb from the jmp_first list */
1314 ptb
= &tb_next
->jmp_first
;
1318 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1319 if (n1
== n
&& tb1
== tb
)
1321 ptb
= &tb1
->jmp_next
[n1
];
1323 *ptb
= tb
->jmp_next
[n
];
1324 tb
->jmp_next
[n
] = NULL
;
1326 /* suppress the jump to next tb in generated code */
1327 tb_reset_jump(tb
, n
);
1329 /* suppress jumps in the tb on which we could have jumped */
1330 tb_reset_jump_recursive(tb_next
);
1334 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1336 tb_reset_jump_recursive2(tb
, 0);
1337 tb_reset_jump_recursive2(tb
, 1);
1340 #if defined(TARGET_HAS_ICE)
1341 #if defined(CONFIG_USER_ONLY)
1342 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1344 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1347 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1349 target_phys_addr_t addr
;
1351 ram_addr_t ram_addr
;
1354 addr
= cpu_get_phys_page_debug(env
, pc
);
1355 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1357 pd
= IO_MEM_UNASSIGNED
;
1359 pd
= p
->phys_offset
;
1361 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1362 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1365 #endif /* TARGET_HAS_ICE */
1367 #if defined(CONFIG_USER_ONLY)
1368 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1373 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1374 int flags
, CPUWatchpoint
**watchpoint
)
1379 /* Add a watchpoint. */
1380 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1381 int flags
, CPUWatchpoint
**watchpoint
)
1383 target_ulong len_mask
= ~(len
- 1);
1386 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1387 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1388 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1389 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1392 wp
= qemu_malloc(sizeof(*wp
));
1395 wp
->len_mask
= len_mask
;
1398 /* keep all GDB-injected watchpoints in front */
1400 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1402 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1404 tlb_flush_page(env
, addr
);
1411 /* Remove a specific watchpoint. */
1412 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1415 target_ulong len_mask
= ~(len
- 1);
1418 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1419 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1420 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1421 cpu_watchpoint_remove_by_ref(env
, wp
);
1428 /* Remove a specific watchpoint by reference. */
1429 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1431 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1433 tlb_flush_page(env
, watchpoint
->vaddr
);
1435 qemu_free(watchpoint
);
1438 /* Remove all matching watchpoints. */
1439 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1441 CPUWatchpoint
*wp
, *next
;
1443 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1444 if (wp
->flags
& mask
)
1445 cpu_watchpoint_remove_by_ref(env
, wp
);
1450 /* Add a breakpoint. */
1451 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1452 CPUBreakpoint
**breakpoint
)
1454 #if defined(TARGET_HAS_ICE)
1457 bp
= qemu_malloc(sizeof(*bp
));
1462 /* keep all GDB-injected breakpoints in front */
1464 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1466 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1468 breakpoint_invalidate(env
, pc
);
1478 /* Remove a specific breakpoint. */
1479 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1481 #if defined(TARGET_HAS_ICE)
1484 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1485 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1486 cpu_breakpoint_remove_by_ref(env
, bp
);
1496 /* Remove a specific breakpoint by reference. */
1497 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1499 #if defined(TARGET_HAS_ICE)
1500 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1502 breakpoint_invalidate(env
, breakpoint
->pc
);
1504 qemu_free(breakpoint
);
1508 /* Remove all matching breakpoints. */
1509 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1511 #if defined(TARGET_HAS_ICE)
1512 CPUBreakpoint
*bp
, *next
;
1514 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1515 if (bp
->flags
& mask
)
1516 cpu_breakpoint_remove_by_ref(env
, bp
);
1521 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1522 CPU loop after each instruction */
1523 void cpu_single_step(CPUState
*env
, int enabled
)
1525 #if defined(TARGET_HAS_ICE)
1526 if (env
->singlestep_enabled
!= enabled
) {
1527 env
->singlestep_enabled
= enabled
;
1529 kvm_update_guest_debug(env
, 0);
1531 /* must flush all the translated code to avoid inconsistencies */
1532 /* XXX: only flush what is necessary */
1539 /* enable or disable low levels log */
1540 void cpu_set_log(int log_flags
)
1542 loglevel
= log_flags
;
1543 if (loglevel
&& !logfile
) {
1544 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1546 perror(logfilename
);
1549 #if !defined(CONFIG_SOFTMMU)
1550 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1552 static char logfile_buf
[4096];
1553 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1555 #elif !defined(_WIN32)
1556 /* Win32 doesn't support line-buffering and requires size >= 2 */
1557 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1561 if (!loglevel
&& logfile
) {
1567 void cpu_set_log_filename(const char *filename
)
1569 logfilename
= strdup(filename
);
1574 cpu_set_log(loglevel
);
1577 static void cpu_unlink_tb(CPUState
*env
)
1579 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1580 problem and hope the cpu will stop of its own accord. For userspace
1581 emulation this often isn't actually as bad as it sounds. Often
1582 signals are used primarily to interrupt blocking syscalls. */
1583 TranslationBlock
*tb
;
1584 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1586 spin_lock(&interrupt_lock
);
1587 tb
= env
->current_tb
;
1588 /* if the cpu is currently executing code, we must unlink it and
1589 all the potentially executing TB */
1591 env
->current_tb
= NULL
;
1592 tb_reset_jump_recursive(tb
);
1594 spin_unlock(&interrupt_lock
);
1597 /* mask must never be zero, except for A20 change call */
1598 void cpu_interrupt(CPUState
*env
, int mask
)
1602 old_mask
= env
->interrupt_request
;
1603 env
->interrupt_request
|= mask
;
1605 #ifndef CONFIG_USER_ONLY
1607 * If called from iothread context, wake the target cpu in
1610 if (!qemu_cpu_self(env
)) {
1617 env
->icount_decr
.u16
.high
= 0xffff;
1618 #ifndef CONFIG_USER_ONLY
1620 && (mask
& ~old_mask
) != 0) {
1621 cpu_abort(env
, "Raised interrupt while not in I/O function");
1629 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1631 env
->interrupt_request
&= ~mask
;
1634 void cpu_exit(CPUState
*env
)
1636 env
->exit_request
= 1;
1640 const CPULogItem cpu_log_items
[] = {
1641 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1642 "show generated host assembly code for each compiled TB" },
1643 { CPU_LOG_TB_IN_ASM
, "in_asm",
1644 "show target assembly code for each compiled TB" },
1645 { CPU_LOG_TB_OP
, "op",
1646 "show micro ops for each compiled TB" },
1647 { CPU_LOG_TB_OP_OPT
, "op_opt",
1650 "before eflags optimization and "
1652 "after liveness analysis" },
1653 { CPU_LOG_INT
, "int",
1654 "show interrupts/exceptions in short format" },
1655 { CPU_LOG_EXEC
, "exec",
1656 "show trace before each executed TB (lots of logs)" },
1657 { CPU_LOG_TB_CPU
, "cpu",
1658 "show CPU state before block translation" },
1660 { CPU_LOG_PCALL
, "pcall",
1661 "show protected mode far calls/returns/exceptions" },
1662 { CPU_LOG_RESET
, "cpu_reset",
1663 "show CPU state before CPU resets" },
1666 { CPU_LOG_IOPORT
, "ioport",
1667 "show all i/o ports accesses" },
1672 #ifndef CONFIG_USER_ONLY
1673 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1674 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1676 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1678 ram_addr_t phys_offset
)
1680 CPUPhysMemoryClient
*client
;
1681 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1682 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1686 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1687 target_phys_addr_t end
)
1689 CPUPhysMemoryClient
*client
;
1690 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1691 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1698 static int cpu_notify_migration_log(int enable
)
1700 CPUPhysMemoryClient
*client
;
1701 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1702 int r
= client
->migration_log(client
, enable
);
1709 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1710 int level
, void **lp
)
1718 PhysPageDesc
*pd
= *lp
;
1719 for (i
= 0; i
< L2_BITS
; ++i
) {
1720 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1721 client
->set_memory(client
, pd
[i
].region_offset
,
1722 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1727 for (i
= 0; i
< L2_BITS
; ++i
) {
1728 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1733 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1736 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1737 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1742 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1744 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1745 phys_page_for_each(client
);
1748 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1750 QLIST_REMOVE(client
, list
);
1754 static int cmp1(const char *s1
, int n
, const char *s2
)
1756 if (strlen(s2
) != n
)
1758 return memcmp(s1
, s2
, n
) == 0;
1761 /* takes a comma separated list of log masks. Return 0 if error. */
1762 int cpu_str_to_log_mask(const char *str
)
1764 const CPULogItem
*item
;
1771 p1
= strchr(p
, ',');
1774 if(cmp1(p
,p1
-p
,"all")) {
1775 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1779 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1780 if (cmp1(p
, p1
- p
, item
->name
))
1794 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1801 fprintf(stderr
, "qemu: fatal: ");
1802 vfprintf(stderr
, fmt
, ap
);
1803 fprintf(stderr
, "\n");
1805 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1807 cpu_dump_state(env
, stderr
, fprintf
, 0);
1809 if (qemu_log_enabled()) {
1810 qemu_log("qemu: fatal: ");
1811 qemu_log_vprintf(fmt
, ap2
);
1814 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1816 log_cpu_state(env
, 0);
1823 #if defined(CONFIG_USER_ONLY)
1825 struct sigaction act
;
1826 sigfillset(&act
.sa_mask
);
1827 act
.sa_handler
= SIG_DFL
;
1828 sigaction(SIGABRT
, &act
, NULL
);
1834 CPUState
*cpu_copy(CPUState
*env
)
1836 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1837 CPUState
*next_cpu
= new_env
->next_cpu
;
1838 int cpu_index
= new_env
->cpu_index
;
1839 #if defined(TARGET_HAS_ICE)
1844 memcpy(new_env
, env
, sizeof(CPUState
));
1846 /* Preserve chaining and index. */
1847 new_env
->next_cpu
= next_cpu
;
1848 new_env
->cpu_index
= cpu_index
;
1850 /* Clone all break/watchpoints.
1851 Note: Once we support ptrace with hw-debug register access, make sure
1852 BP_CPU break/watchpoints are handled correctly on clone. */
1853 QTAILQ_INIT(&env
->breakpoints
);
1854 QTAILQ_INIT(&env
->watchpoints
);
1855 #if defined(TARGET_HAS_ICE)
1856 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1857 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1859 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1860 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1868 #if !defined(CONFIG_USER_ONLY)
1870 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1874 /* Discard jump cache entries for any tb which might potentially
1875 overlap the flushed page. */
1876 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1877 memset (&env
->tb_jmp_cache
[i
], 0,
1878 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1880 i
= tb_jmp_cache_hash_page(addr
);
1881 memset (&env
->tb_jmp_cache
[i
], 0,
1882 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1885 static CPUTLBEntry s_cputlb_empty_entry
= {
1892 /* NOTE: if flush_global is true, also flush global entries (not
1894 void tlb_flush(CPUState
*env
, int flush_global
)
1898 #if defined(DEBUG_TLB)
1899 printf("tlb_flush:\n");
1901 /* must reset current TB so that interrupts cannot modify the
1902 links while we are modifying them */
1903 env
->current_tb
= NULL
;
1905 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1907 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1908 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1912 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1917 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1919 if (addr
== (tlb_entry
->addr_read
&
1920 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1921 addr
== (tlb_entry
->addr_write
&
1922 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1923 addr
== (tlb_entry
->addr_code
&
1924 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1925 *tlb_entry
= s_cputlb_empty_entry
;
1929 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1934 #if defined(DEBUG_TLB)
1935 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env
->current_tb
= NULL
;
1941 addr
&= TARGET_PAGE_MASK
;
1942 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1943 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1944 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1946 tlb_flush_jmp_cache(env
, addr
);
1949 /* update the TLBs so that writes to code in the virtual page 'addr'
1951 static void tlb_protect_code(ram_addr_t ram_addr
)
1953 cpu_physical_memory_reset_dirty(ram_addr
,
1954 ram_addr
+ TARGET_PAGE_SIZE
,
1958 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1959 tested for self modifying code */
1960 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1963 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1966 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1967 unsigned long start
, unsigned long length
)
1970 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1971 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1972 if ((addr
- start
) < length
) {
1973 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1978 /* Note: start and end must be within the same ram block. */
1979 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1983 unsigned long length
, start1
;
1987 start
&= TARGET_PAGE_MASK
;
1988 end
= TARGET_PAGE_ALIGN(end
);
1990 length
= end
- start
;
1993 len
= length
>> TARGET_PAGE_BITS
;
1994 mask
= ~dirty_flags
;
1995 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1996 for(i
= 0; i
< len
; i
++)
1999 /* we modify the TLB cache so that the dirty bit will be set again
2000 when accessing the range */
2001 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2002 /* Chek that we don't span multiple blocks - this breaks the
2003 address comparisons below. */
2004 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2005 != (end
- 1) - start
) {
2009 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2011 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2012 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2013 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2019 int cpu_physical_memory_set_dirty_tracking(int enable
)
2022 in_migration
= enable
;
2023 ret
= cpu_notify_migration_log(!!enable
);
2027 int cpu_physical_memory_get_dirty_tracking(void)
2029 return in_migration
;
2032 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2033 target_phys_addr_t end_addr
)
2037 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2041 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2043 ram_addr_t ram_addr
;
2046 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2047 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2048 + tlb_entry
->addend
);
2049 ram_addr
= qemu_ram_addr_from_host(p
);
2050 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2051 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2056 /* update the TLB according to the current state of the dirty bits */
2057 void cpu_tlb_update_dirty(CPUState
*env
)
2061 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2062 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2063 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2067 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2069 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2070 tlb_entry
->addr_write
= vaddr
;
2073 /* update the TLB corresponding to virtual page vaddr
2074 so that it is no longer dirty */
2075 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2080 vaddr
&= TARGET_PAGE_MASK
;
2081 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2082 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2083 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2086 /* add a new TLB entry. At most one entry for a given virtual address
2087 is permitted. Return 0 if OK or 2 if the page could not be mapped
2088 (can only happen in non SOFTMMU mode for I/O pages or pages
2089 conflicting with the host address space). */
2090 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2091 target_phys_addr_t paddr
, int prot
,
2092 int mmu_idx
, int is_softmmu
)
2097 target_ulong address
;
2098 target_ulong code_address
;
2099 target_phys_addr_t addend
;
2103 target_phys_addr_t iotlb
;
2105 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2107 pd
= IO_MEM_UNASSIGNED
;
2109 pd
= p
->phys_offset
;
2111 #if defined(DEBUG_TLB)
2112 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2113 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2118 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2119 /* IO memory case (romd handled later) */
2120 address
|= TLB_MMIO
;
2122 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2123 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2125 iotlb
= pd
& TARGET_PAGE_MASK
;
2126 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2127 iotlb
|= IO_MEM_NOTDIRTY
;
2129 iotlb
|= IO_MEM_ROM
;
2131 /* IO handlers are currently passed a physical address.
2132 It would be nice to pass an offset from the base address
2133 of that region. This would avoid having to special case RAM,
2134 and avoid full address decoding in every device.
2135 We can't use the high bits of pd for this because
2136 IO_MEM_ROMD uses these as a ram address. */
2137 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2139 iotlb
+= p
->region_offset
;
2145 code_address
= address
;
2146 /* Make accesses to pages with watchpoints go via the
2147 watchpoint trap routines. */
2148 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2149 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2150 iotlb
= io_mem_watch
+ paddr
;
2151 /* TODO: The memory case can be optimized by not trapping
2152 reads of pages with a write breakpoint. */
2153 address
|= TLB_MMIO
;
2157 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2158 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2159 te
= &env
->tlb_table
[mmu_idx
][index
];
2160 te
->addend
= addend
- vaddr
;
2161 if (prot
& PAGE_READ
) {
2162 te
->addr_read
= address
;
2167 if (prot
& PAGE_EXEC
) {
2168 te
->addr_code
= code_address
;
2172 if (prot
& PAGE_WRITE
) {
2173 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2174 (pd
& IO_MEM_ROMD
)) {
2175 /* Write access calls the I/O callback. */
2176 te
->addr_write
= address
| TLB_MMIO
;
2177 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2178 !cpu_physical_memory_is_dirty(pd
)) {
2179 te
->addr_write
= address
| TLB_NOTDIRTY
;
2181 te
->addr_write
= address
;
2184 te
->addr_write
= -1;
2191 void tlb_flush(CPUState
*env
, int flush_global
)
2195 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2200 * Walks guest process memory "regions" one by one
2201 * and calls callback function 'fn' for each region.
2204 struct walk_memory_regions_data
2206 walk_memory_regions_fn fn
;
2208 unsigned long start
;
2212 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2213 unsigned long end
, int new_prot
)
2215 if (data
->start
!= -1ul) {
2216 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2222 data
->start
= (new_prot
? end
: -1ul);
2223 data
->prot
= new_prot
;
2228 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2229 unsigned long base
, int level
, void **lp
)
2235 return walk_memory_regions_end(data
, base
, 0);
2240 for (i
= 0; i
< L2_BITS
; ++i
) {
2241 int prot
= pd
[i
].flags
;
2243 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2244 if (prot
!= data
->prot
) {
2245 rc
= walk_memory_regions_end(data
, pa
, prot
);
2253 for (i
= 0; i
< L2_BITS
; ++i
) {
2254 pa
= base
| (i
<< (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2255 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2265 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2267 struct walk_memory_regions_data data
;
2275 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2276 int rc
= walk_memory_regions_1(&data
, i
<< V_L1_SHIFT
,
2277 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2283 return walk_memory_regions_end(&data
, 0, 0);
2286 static int dump_region(void *priv
, unsigned long start
,
2287 unsigned long end
, unsigned long prot
)
2289 FILE *f
= (FILE *)priv
;
2291 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2292 start
, end
, end
- start
,
2293 ((prot
& PAGE_READ
) ? 'r' : '-'),
2294 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2295 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2300 /* dump memory mappings */
2301 void page_dump(FILE *f
)
2303 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2304 "start", "end", "size", "prot");
2305 walk_memory_regions(f
, dump_region
);
2308 int page_get_flags(target_ulong address
)
2312 p
= page_find(address
>> TARGET_PAGE_BITS
);
2318 /* modify the flags of a page and invalidate the code if
2319 necessary. The flag PAGE_WRITE_ORG is positioned automatically
2320 depending on PAGE_WRITE */
2321 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2326 /* mmap_lock should already be held. */
2327 start
= start
& TARGET_PAGE_MASK
;
2328 end
= TARGET_PAGE_ALIGN(end
);
2329 if (flags
& PAGE_WRITE
)
2330 flags
|= PAGE_WRITE_ORG
;
2331 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2332 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2333 /* We may be called for host regions that are outside guest
2337 /* if the write protection is set, then we invalidate the code
2339 if (!(p
->flags
& PAGE_WRITE
) &&
2340 (flags
& PAGE_WRITE
) &&
2342 tb_invalidate_phys_page(addr
, 0, NULL
);
2348 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2354 if (start
+ len
< start
)
2355 /* we've wrapped around */
2358 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2359 start
= start
& TARGET_PAGE_MASK
;
2361 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2362 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2365 if( !(p
->flags
& PAGE_VALID
) )
2368 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2370 if (flags
& PAGE_WRITE
) {
2371 if (!(p
->flags
& PAGE_WRITE_ORG
))
2373 /* unprotect the page if it was put read-only because it
2374 contains translated code */
2375 if (!(p
->flags
& PAGE_WRITE
)) {
2376 if (!page_unprotect(addr
, 0, NULL
))
2385 /* called from signal handler: invalidate the code and unprotect the
2386 page. Return TRUE if the fault was successfully handled. */
2387 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2389 unsigned int page_index
, prot
, pindex
;
2391 target_ulong host_start
, host_end
, addr
;
2393 /* Technically this isn't safe inside a signal handler. However we
2394 know this only ever happens in a synchronous SEGV handler, so in
2395 practice it seems to be ok. */
2398 host_start
= address
& qemu_host_page_mask
;
2399 page_index
= host_start
>> TARGET_PAGE_BITS
;
2400 p1
= page_find(page_index
);
2405 host_end
= host_start
+ qemu_host_page_size
;
2408 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2412 /* if the page was really writable, then we change its
2413 protection back to writable */
2414 if (prot
& PAGE_WRITE_ORG
) {
2415 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2416 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2417 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2418 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2419 p1
[pindex
].flags
|= PAGE_WRITE
;
2420 /* and since the content will be modified, we must invalidate
2421 the corresponding translated code. */
2422 tb_invalidate_phys_page(address
, pc
, puc
);
2423 #ifdef DEBUG_TB_CHECK
2424 tb_invalidate_check(address
);
2434 static inline void tlb_set_dirty(CPUState
*env
,
2435 unsigned long addr
, target_ulong vaddr
)
2438 #endif /* defined(CONFIG_USER_ONLY) */
2440 #if !defined(CONFIG_USER_ONLY)
2442 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2443 typedef struct subpage_t
{
2444 target_phys_addr_t base
;
2445 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
2446 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
2447 void *opaque
[TARGET_PAGE_SIZE
][2][4];
2448 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
2451 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2452 ram_addr_t memory
, ram_addr_t region_offset
);
2453 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2454 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2455 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2458 if (addr > start_addr) \
2461 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2462 if (start_addr2 > 0) \
2466 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2467 end_addr2 = TARGET_PAGE_SIZE - 1; \
2469 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2470 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2475 /* register physical memory.
2476 For RAM, 'size' must be a multiple of the target page size.
2477 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2478 io memory page. The address used when calling the IO function is
2479 the offset from the start of the region, plus region_offset. Both
2480 start_addr and region_offset are rounded down to a page boundary
2481 before calculating this offset. This should not be a problem unless
2482 the low bits of start_addr and region_offset differ. */
2483 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2485 ram_addr_t phys_offset
,
2486 ram_addr_t region_offset
)
2488 target_phys_addr_t addr
, end_addr
;
2491 ram_addr_t orig_size
= size
;
2494 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2496 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2497 region_offset
= start_addr
;
2499 region_offset
&= TARGET_PAGE_MASK
;
2500 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2501 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2502 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2503 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2504 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2505 ram_addr_t orig_memory
= p
->phys_offset
;
2506 target_phys_addr_t start_addr2
, end_addr2
;
2507 int need_subpage
= 0;
2509 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2511 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2512 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2513 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2514 &p
->phys_offset
, orig_memory
,
2517 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2520 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2522 p
->region_offset
= 0;
2524 p
->phys_offset
= phys_offset
;
2525 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2526 (phys_offset
& IO_MEM_ROMD
))
2527 phys_offset
+= TARGET_PAGE_SIZE
;
2530 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2531 p
->phys_offset
= phys_offset
;
2532 p
->region_offset
= region_offset
;
2533 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2534 (phys_offset
& IO_MEM_ROMD
)) {
2535 phys_offset
+= TARGET_PAGE_SIZE
;
2537 target_phys_addr_t start_addr2
, end_addr2
;
2538 int need_subpage
= 0;
2540 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2541 end_addr2
, need_subpage
);
2543 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2544 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2545 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2546 addr
& TARGET_PAGE_MASK
);
2547 subpage_register(subpage
, start_addr2
, end_addr2
,
2548 phys_offset
, region_offset
);
2549 p
->region_offset
= 0;
2553 region_offset
+= TARGET_PAGE_SIZE
;
2556 /* since each CPU stores ram addresses in its TLB cache, we must
2557 reset the modified entries */
2559 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2564 /* XXX: temporary until new memory mapping API */
2565 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2569 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2571 return IO_MEM_UNASSIGNED
;
2572 return p
->phys_offset
;
2575 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2578 kvm_coalesce_mmio_region(addr
, size
);
2581 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2584 kvm_uncoalesce_mmio_region(addr
, size
);
2587 void qemu_flush_coalesced_mmio_buffer(void)
2590 kvm_flush_coalesced_mmio_buffer();
2593 #if defined(__linux__) && !defined(TARGET_S390X)
2595 #include <sys/vfs.h>
2597 #define HUGETLBFS_MAGIC 0x958458f6
2599 static long gethugepagesize(const char *path
)
2605 ret
= statfs(path
, &fs
);
2606 } while (ret
!= 0 && errno
== EINTR
);
2613 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2614 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2619 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2627 unsigned long hpagesize
;
2629 hpagesize
= gethugepagesize(path
);
2634 if (memory
< hpagesize
) {
2638 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2639 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2643 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2647 fd
= mkstemp(filename
);
2656 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2659 * ftruncate is not supported by hugetlbfs in older
2660 * hosts, so don't bother bailing out on errors.
2661 * If anything goes wrong with it under other filesystems,
2664 if (ftruncate(fd
, memory
))
2665 perror("ftruncate");
2668 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2669 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2670 * to sidestep this quirk.
2672 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2673 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2675 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2677 if (area
== MAP_FAILED
) {
2678 perror("file_ram_alloc: can't mmap RAM pages");
2686 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2688 RAMBlock
*new_block
;
2690 size
= TARGET_PAGE_ALIGN(size
);
2691 new_block
= qemu_malloc(sizeof(*new_block
));
2694 #if defined (__linux__) && !defined(TARGET_S390X)
2695 new_block
->host
= file_ram_alloc(size
, mem_path
);
2696 if (!new_block
->host
)
2699 fprintf(stderr
, "-mem-path option unsupported\n");
2703 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2704 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2705 new_block
->host
= mmap((void*)0x1000000, size
,
2706 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2707 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2709 new_block
->host
= qemu_vmalloc(size
);
2711 #ifdef MADV_MERGEABLE
2712 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2715 new_block
->offset
= last_ram_offset
;
2716 new_block
->length
= size
;
2718 new_block
->next
= ram_blocks
;
2719 ram_blocks
= new_block
;
2721 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2722 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2723 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2724 0xff, size
>> TARGET_PAGE_BITS
);
2726 last_ram_offset
+= size
;
2729 kvm_setup_guest_memory(new_block
->host
, size
);
2731 return new_block
->offset
;
2734 void qemu_ram_free(ram_addr_t addr
)
2736 /* TODO: implement this. */
2739 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2740 With the exception of the softmmu code in this file, this should
2741 only be used for local memory (e.g. video ram) that the device owns,
2742 and knows it isn't going to access beyond the end of the block.
2744 It should not be used for general purpose DMA.
2745 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2747 void *qemu_get_ram_ptr(ram_addr_t addr
)
2754 prevp
= &ram_blocks
;
2756 while (block
&& (block
->offset
> addr
2757 || block
->offset
+ block
->length
<= addr
)) {
2759 prevp
= &prev
->next
;
2761 block
= block
->next
;
2764 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2767 /* Move this entry to to start of the list. */
2769 prev
->next
= block
->next
;
2770 block
->next
= *prevp
;
2773 return block
->host
+ (addr
- block
->offset
);
2776 /* Some of the softmmu routines need to translate from a host pointer
2777 (typically a TLB entry) back to a ram offset. */
2778 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2782 uint8_t *host
= ptr
;
2786 while (block
&& (block
->host
> host
2787 || block
->host
+ block
->length
<= host
)) {
2789 block
= block
->next
;
2792 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2795 return block
->offset
+ (host
- block
->host
);
2798 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2800 #ifdef DEBUG_UNASSIGNED
2801 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2803 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2804 do_unassigned_access(addr
, 0, 0, 0, 1);
2809 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2811 #ifdef DEBUG_UNASSIGNED
2812 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2814 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2815 do_unassigned_access(addr
, 0, 0, 0, 2);
2820 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2822 #ifdef DEBUG_UNASSIGNED
2823 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2825 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2826 do_unassigned_access(addr
, 0, 0, 0, 4);
2831 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2833 #ifdef DEBUG_UNASSIGNED
2834 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2836 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2837 do_unassigned_access(addr
, 1, 0, 0, 1);
2841 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2843 #ifdef DEBUG_UNASSIGNED
2844 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2846 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2847 do_unassigned_access(addr
, 1, 0, 0, 2);
2851 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2853 #ifdef DEBUG_UNASSIGNED
2854 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2856 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2857 do_unassigned_access(addr
, 1, 0, 0, 4);
2861 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2862 unassigned_mem_readb
,
2863 unassigned_mem_readw
,
2864 unassigned_mem_readl
,
2867 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2868 unassigned_mem_writeb
,
2869 unassigned_mem_writew
,
2870 unassigned_mem_writel
,
2873 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2877 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2878 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2879 #if !defined(CONFIG_USER_ONLY)
2880 tb_invalidate_phys_page_fast(ram_addr
, 1);
2881 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2884 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2885 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2886 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2887 /* we remove the notdirty callback only if the code has been
2889 if (dirty_flags
== 0xff)
2890 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2893 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2897 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2898 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2899 #if !defined(CONFIG_USER_ONLY)
2900 tb_invalidate_phys_page_fast(ram_addr
, 2);
2901 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2904 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2905 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2906 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2907 /* we remove the notdirty callback only if the code has been
2909 if (dirty_flags
== 0xff)
2910 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2913 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2917 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2918 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2919 #if !defined(CONFIG_USER_ONLY)
2920 tb_invalidate_phys_page_fast(ram_addr
, 4);
2921 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2924 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2925 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2926 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2927 /* we remove the notdirty callback only if the code has been
2929 if (dirty_flags
== 0xff)
2930 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2933 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2934 NULL
, /* never used */
2935 NULL
, /* never used */
2936 NULL
, /* never used */
2939 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2940 notdirty_mem_writeb
,
2941 notdirty_mem_writew
,
2942 notdirty_mem_writel
,
2945 /* Generate a debug exception if a watchpoint has been hit. */
2946 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2948 CPUState
*env
= cpu_single_env
;
2949 target_ulong pc
, cs_base
;
2950 TranslationBlock
*tb
;
2955 if (env
->watchpoint_hit
) {
2956 /* We re-entered the check after replacing the TB. Now raise
2957 * the debug interrupt so that is will trigger after the
2958 * current instruction. */
2959 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2962 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2963 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2964 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2965 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2966 wp
->flags
|= BP_WATCHPOINT_HIT
;
2967 if (!env
->watchpoint_hit
) {
2968 env
->watchpoint_hit
= wp
;
2969 tb
= tb_find_pc(env
->mem_io_pc
);
2971 cpu_abort(env
, "check_watchpoint: could not find TB for "
2972 "pc=%p", (void *)env
->mem_io_pc
);
2974 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2975 tb_phys_invalidate(tb
, -1);
2976 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2977 env
->exception_index
= EXCP_DEBUG
;
2979 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2980 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2982 cpu_resume_from_signal(env
, NULL
);
2985 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2990 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2991 so these check for a hit then pass through to the normal out-of-line
2993 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2995 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2996 return ldub_phys(addr
);
2999 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3001 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3002 return lduw_phys(addr
);
3005 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3007 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3008 return ldl_phys(addr
);
3011 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3014 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3015 stb_phys(addr
, val
);
3018 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3021 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3022 stw_phys(addr
, val
);
3025 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3028 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3029 stl_phys(addr
, val
);
3032 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3038 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3044 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
3050 idx
= SUBPAGE_IDX(addr
);
3051 #if defined(DEBUG_SUBPAGE)
3052 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3053 mmio
, len
, addr
, idx
);
3055 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
3056 addr
+ mmio
->region_offset
[idx
][0][len
]);
3061 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3062 uint32_t value
, unsigned int len
)
3066 idx
= SUBPAGE_IDX(addr
);
3067 #if defined(DEBUG_SUBPAGE)
3068 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
3069 mmio
, len
, addr
, idx
, value
);
3071 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
3072 addr
+ mmio
->region_offset
[idx
][1][len
],
3076 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3078 #if defined(DEBUG_SUBPAGE)
3079 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3082 return subpage_readlen(opaque
, addr
, 0);
3085 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3088 #if defined(DEBUG_SUBPAGE)
3089 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3091 subpage_writelen(opaque
, addr
, value
, 0);
3094 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3096 #if defined(DEBUG_SUBPAGE)
3097 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3100 return subpage_readlen(opaque
, addr
, 1);
3103 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3106 #if defined(DEBUG_SUBPAGE)
3107 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3109 subpage_writelen(opaque
, addr
, value
, 1);
3112 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3114 #if defined(DEBUG_SUBPAGE)
3115 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3118 return subpage_readlen(opaque
, addr
, 2);
3121 static void subpage_writel (void *opaque
,
3122 target_phys_addr_t addr
, uint32_t value
)
3124 #if defined(DEBUG_SUBPAGE)
3125 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3127 subpage_writelen(opaque
, addr
, value
, 2);
3130 static CPUReadMemoryFunc
* const subpage_read
[] = {
3136 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3142 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3143 ram_addr_t memory
, ram_addr_t region_offset
)
3148 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3150 idx
= SUBPAGE_IDX(start
);
3151 eidx
= SUBPAGE_IDX(end
);
3152 #if defined(DEBUG_SUBPAGE)
3153 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3154 mmio
, start
, end
, idx
, eidx
, memory
);
3156 memory
>>= IO_MEM_SHIFT
;
3157 for (; idx
<= eidx
; idx
++) {
3158 for (i
= 0; i
< 4; i
++) {
3159 if (io_mem_read
[memory
][i
]) {
3160 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3161 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3162 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3164 if (io_mem_write
[memory
][i
]) {
3165 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3166 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3167 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3175 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3176 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3181 mmio
= qemu_mallocz(sizeof(subpage_t
));
3184 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3185 #if defined(DEBUG_SUBPAGE)
3186 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3187 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3189 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3190 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3196 static int get_free_io_mem_idx(void)
3200 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3201 if (!io_mem_used
[i
]) {
3205 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3209 /* mem_read and mem_write are arrays of functions containing the
3210 function to access byte (index 0), word (index 1) and dword (index
3211 2). Functions can be omitted with a NULL function pointer.
3212 If io_index is non zero, the corresponding io zone is
3213 modified. If it is zero, a new io zone is allocated. The return
3214 value can be used with cpu_register_physical_memory(). (-1) is
3215 returned if error. */
3216 static int cpu_register_io_memory_fixed(int io_index
,
3217 CPUReadMemoryFunc
* const *mem_read
,
3218 CPUWriteMemoryFunc
* const *mem_write
,
3221 int i
, subwidth
= 0;
3223 if (io_index
<= 0) {
3224 io_index
= get_free_io_mem_idx();
3228 io_index
>>= IO_MEM_SHIFT
;
3229 if (io_index
>= IO_MEM_NB_ENTRIES
)
3233 for(i
= 0;i
< 3; i
++) {
3234 if (!mem_read
[i
] || !mem_write
[i
])
3235 subwidth
= IO_MEM_SUBWIDTH
;
3236 io_mem_read
[io_index
][i
] = mem_read
[i
];
3237 io_mem_write
[io_index
][i
] = mem_write
[i
];
3239 io_mem_opaque
[io_index
] = opaque
;
3240 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3243 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3244 CPUWriteMemoryFunc
* const *mem_write
,
3247 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3250 void cpu_unregister_io_memory(int io_table_address
)
3253 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3255 for (i
=0;i
< 3; i
++) {
3256 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3257 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3259 io_mem_opaque
[io_index
] = NULL
;
3260 io_mem_used
[io_index
] = 0;
3263 static void io_mem_init(void)
3267 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3268 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3269 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3273 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3274 watch_mem_write
, NULL
);
3277 #endif /* !defined(CONFIG_USER_ONLY) */
3279 /* physical memory access (slow version, mainly for debug) */
3280 #if defined(CONFIG_USER_ONLY)
3281 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3282 uint8_t *buf
, int len
, int is_write
)
3289 page
= addr
& TARGET_PAGE_MASK
;
3290 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3293 flags
= page_get_flags(page
);
3294 if (!(flags
& PAGE_VALID
))
3297 if (!(flags
& PAGE_WRITE
))
3299 /* XXX: this code should not depend on lock_user */
3300 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3303 unlock_user(p
, addr
, l
);
3305 if (!(flags
& PAGE_READ
))
3307 /* XXX: this code should not depend on lock_user */
3308 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3311 unlock_user(p
, addr
, 0);
3321 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3322 int len
, int is_write
)
3327 target_phys_addr_t page
;
3332 page
= addr
& TARGET_PAGE_MASK
;
3333 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3336 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3338 pd
= IO_MEM_UNASSIGNED
;
3340 pd
= p
->phys_offset
;
3344 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3345 target_phys_addr_t addr1
= addr
;
3346 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3348 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3349 /* XXX: could force cpu_single_env to NULL to avoid
3351 if (l
>= 4 && ((addr1
& 3) == 0)) {
3352 /* 32 bit write access */
3354 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3356 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3357 /* 16 bit write access */
3359 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3362 /* 8 bit write access */
3364 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3368 unsigned long addr1
;
3369 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3371 ptr
= qemu_get_ram_ptr(addr1
);
3372 memcpy(ptr
, buf
, l
);
3373 if (!cpu_physical_memory_is_dirty(addr1
)) {
3374 /* invalidate code */
3375 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3377 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3378 (0xff & ~CODE_DIRTY_FLAG
);
3382 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3383 !(pd
& IO_MEM_ROMD
)) {
3384 target_phys_addr_t addr1
= addr
;
3386 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3388 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3389 if (l
>= 4 && ((addr1
& 3) == 0)) {
3390 /* 32 bit read access */
3391 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3394 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3395 /* 16 bit read access */
3396 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3400 /* 8 bit read access */
3401 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3407 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3408 (addr
& ~TARGET_PAGE_MASK
);
3409 memcpy(buf
, ptr
, l
);
3418 /* used for ROM loading : can write in RAM and ROM */
3419 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3420 const uint8_t *buf
, int len
)
3424 target_phys_addr_t page
;
3429 page
= addr
& TARGET_PAGE_MASK
;
3430 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3433 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3435 pd
= IO_MEM_UNASSIGNED
;
3437 pd
= p
->phys_offset
;
3440 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3441 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3442 !(pd
& IO_MEM_ROMD
)) {
3445 unsigned long addr1
;
3446 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3448 ptr
= qemu_get_ram_ptr(addr1
);
3449 memcpy(ptr
, buf
, l
);
3459 target_phys_addr_t addr
;
3460 target_phys_addr_t len
;
3463 static BounceBuffer bounce
;
3465 typedef struct MapClient
{
3467 void (*callback
)(void *opaque
);
3468 QLIST_ENTRY(MapClient
) link
;
3471 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3472 = QLIST_HEAD_INITIALIZER(map_client_list
);
3474 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3476 MapClient
*client
= qemu_malloc(sizeof(*client
));
3478 client
->opaque
= opaque
;
3479 client
->callback
= callback
;
3480 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3484 void cpu_unregister_map_client(void *_client
)
3486 MapClient
*client
= (MapClient
*)_client
;
3488 QLIST_REMOVE(client
, link
);
3492 static void cpu_notify_map_clients(void)
3496 while (!QLIST_EMPTY(&map_client_list
)) {
3497 client
= QLIST_FIRST(&map_client_list
);
3498 client
->callback(client
->opaque
);
3499 cpu_unregister_map_client(client
);
3503 /* Map a physical memory region into a host virtual address.
3504 * May map a subset of the requested range, given by and returned in *plen.
3505 * May return NULL if resources needed to perform the mapping are exhausted.
3506 * Use only for reads OR writes - not for read-modify-write operations.
3507 * Use cpu_register_map_client() to know when retrying the map operation is
3508 * likely to succeed.
3510 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3511 target_phys_addr_t
*plen
,
3514 target_phys_addr_t len
= *plen
;
3515 target_phys_addr_t done
= 0;
3517 uint8_t *ret
= NULL
;
3519 target_phys_addr_t page
;
3522 unsigned long addr1
;
3525 page
= addr
& TARGET_PAGE_MASK
;
3526 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3529 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3531 pd
= IO_MEM_UNASSIGNED
;
3533 pd
= p
->phys_offset
;
3536 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3537 if (done
|| bounce
.buffer
) {
3540 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3544 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3546 ptr
= bounce
.buffer
;
3548 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3549 ptr
= qemu_get_ram_ptr(addr1
);
3553 } else if (ret
+ done
!= ptr
) {
3565 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3566 * Will also mark the memory as dirty if is_write == 1. access_len gives
3567 * the amount of memory that was actually read or written by the caller.
3569 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3570 int is_write
, target_phys_addr_t access_len
)
3572 if (buffer
!= bounce
.buffer
) {
3574 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3575 while (access_len
) {
3577 l
= TARGET_PAGE_SIZE
;
3580 if (!cpu_physical_memory_is_dirty(addr1
)) {
3581 /* invalidate code */
3582 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3584 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3585 (0xff & ~CODE_DIRTY_FLAG
);
3594 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3596 qemu_vfree(bounce
.buffer
);
3597 bounce
.buffer
= NULL
;
3598 cpu_notify_map_clients();
3601 /* warning: addr must be aligned */
3602 uint32_t ldl_phys(target_phys_addr_t addr
)
3610 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3612 pd
= IO_MEM_UNASSIGNED
;
3614 pd
= p
->phys_offset
;
3617 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3618 !(pd
& IO_MEM_ROMD
)) {
3620 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3622 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3623 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3626 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3627 (addr
& ~TARGET_PAGE_MASK
);
3633 /* warning: addr must be aligned */
3634 uint64_t ldq_phys(target_phys_addr_t addr
)
3642 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3644 pd
= IO_MEM_UNASSIGNED
;
3646 pd
= p
->phys_offset
;
3649 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3650 !(pd
& IO_MEM_ROMD
)) {
3652 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3654 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3655 #ifdef TARGET_WORDS_BIGENDIAN
3656 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3657 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3659 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3660 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3664 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3665 (addr
& ~TARGET_PAGE_MASK
);
3672 uint32_t ldub_phys(target_phys_addr_t addr
)
3675 cpu_physical_memory_read(addr
, &val
, 1);
3680 uint32_t lduw_phys(target_phys_addr_t addr
)
3683 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3684 return tswap16(val
);
3687 /* warning: addr must be aligned. The ram page is not masked as dirty
3688 and the code inside is not invalidated. It is useful if the dirty
3689 bits are used to track modified PTEs */
3690 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3697 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3699 pd
= IO_MEM_UNASSIGNED
;
3701 pd
= p
->phys_offset
;
3704 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3705 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3707 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3708 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3710 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3711 ptr
= qemu_get_ram_ptr(addr1
);
3714 if (unlikely(in_migration
)) {
3715 if (!cpu_physical_memory_is_dirty(addr1
)) {
3716 /* invalidate code */
3717 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3719 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3720 (0xff & ~CODE_DIRTY_FLAG
);
3726 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3733 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3735 pd
= IO_MEM_UNASSIGNED
;
3737 pd
= p
->phys_offset
;
3740 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3741 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3743 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3744 #ifdef TARGET_WORDS_BIGENDIAN
3745 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3746 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3748 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3749 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3752 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3753 (addr
& ~TARGET_PAGE_MASK
);
3758 /* warning: addr must be aligned */
3759 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3766 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3768 pd
= IO_MEM_UNASSIGNED
;
3770 pd
= p
->phys_offset
;
3773 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3774 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3776 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3777 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3779 unsigned long addr1
;
3780 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3782 ptr
= qemu_get_ram_ptr(addr1
);
3784 if (!cpu_physical_memory_is_dirty(addr1
)) {
3785 /* invalidate code */
3786 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3788 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3789 (0xff & ~CODE_DIRTY_FLAG
);
3795 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3798 cpu_physical_memory_write(addr
, &v
, 1);
3802 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3804 uint16_t v
= tswap16(val
);
3805 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3809 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3812 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3815 /* virtual memory access for debug (includes writing to ROM) */
3816 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3817 uint8_t *buf
, int len
, int is_write
)
3820 target_phys_addr_t phys_addr
;
3824 page
= addr
& TARGET_PAGE_MASK
;
3825 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3826 /* if no physical page mapped, return an error */
3827 if (phys_addr
== -1)
3829 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3832 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3834 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3836 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3845 /* in deterministic execution mode, instructions doing device I/Os
3846 must be at the end of the TB */
3847 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3849 TranslationBlock
*tb
;
3851 target_ulong pc
, cs_base
;
3854 tb
= tb_find_pc((unsigned long)retaddr
);
3856 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3859 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3860 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3861 /* Calculate how many instructions had been executed before the fault
3863 n
= n
- env
->icount_decr
.u16
.low
;
3864 /* Generate a new TB ending on the I/O insn. */
3866 /* On MIPS and SH, delay slot instructions can only be restarted if
3867 they were already the first instruction in the TB. If this is not
3868 the first instruction in a TB then re-execute the preceding
3870 #if defined(TARGET_MIPS)
3871 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3872 env
->active_tc
.PC
-= 4;
3873 env
->icount_decr
.u16
.low
++;
3874 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3876 #elif defined(TARGET_SH4)
3877 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3880 env
->icount_decr
.u16
.low
++;
3881 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3884 /* This should never happen. */
3885 if (n
> CF_COUNT_MASK
)
3886 cpu_abort(env
, "TB too big during recompile");
3888 cflags
= n
| CF_LAST_IO
;
3890 cs_base
= tb
->cs_base
;
3892 tb_phys_invalidate(tb
, -1);
3893 /* FIXME: In theory this could raise an exception. In practice
3894 we have already translated the block once so it's probably ok. */
3895 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3896 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3897 the first in the TB) then we end up generating a whole new TB and
3898 repeating the fault, which is horribly inefficient.
3899 Better would be to execute just this insn uncached, or generate a
3901 cpu_resume_from_signal(env
, NULL
);
3904 void dump_exec_info(FILE *f
,
3905 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3907 int i
, target_code_size
, max_target_code_size
;
3908 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3909 TranslationBlock
*tb
;
3911 target_code_size
= 0;
3912 max_target_code_size
= 0;
3914 direct_jmp_count
= 0;
3915 direct_jmp2_count
= 0;
3916 for(i
= 0; i
< nb_tbs
; i
++) {
3918 target_code_size
+= tb
->size
;
3919 if (tb
->size
> max_target_code_size
)
3920 max_target_code_size
= tb
->size
;
3921 if (tb
->page_addr
[1] != -1)
3923 if (tb
->tb_next_offset
[0] != 0xffff) {
3925 if (tb
->tb_next_offset
[1] != 0xffff) {
3926 direct_jmp2_count
++;
3930 /* XXX: avoid using doubles ? */
3931 cpu_fprintf(f
, "Translation buffer state:\n");
3932 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3933 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3934 cpu_fprintf(f
, "TB count %d/%d\n",
3935 nb_tbs
, code_gen_max_blocks
);
3936 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3937 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3938 max_target_code_size
);
3939 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3940 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3941 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3942 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3944 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3945 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3947 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3949 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3950 cpu_fprintf(f
, "\nStatistics:\n");
3951 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3952 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3953 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3954 tcg_dump_info(f
, cpu_fprintf
);
3957 #if !defined(CONFIG_USER_ONLY)
3959 #define MMUSUFFIX _cmmu
3960 #define GETPC() NULL
3961 #define env cpu_single_env
3962 #define SOFTMMU_CODE_ACCESS
3965 #include "softmmu_template.h"
3968 #include "softmmu_template.h"
3971 #include "softmmu_template.h"
3974 #include "softmmu_template.h"