2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 static TranslationBlock
*tbs
;
66 int code_gen_max_blocks
;
67 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
69 /* any access to the tbs or the page table must use this lock */
70 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
72 #if defined(__arm__) || defined(__sparc_v9__)
73 /* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
75 section close to code segment. */
76 #define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
80 /* Maximum alignment for Win32 is 16. */
81 #define code_gen_section \
82 __attribute__((aligned (16)))
84 #define code_gen_section \
85 __attribute__((aligned (32)))
88 uint8_t code_gen_prologue
[1024] code_gen_section
;
89 static uint8_t *code_gen_buffer
;
90 static unsigned long code_gen_buffer_size
;
91 /* threshold to flush the translated code buffer */
92 static unsigned long code_gen_buffer_max_size
;
93 uint8_t *code_gen_ptr
;
95 #if !defined(CONFIG_USER_ONLY)
97 uint8_t *phys_ram_dirty
;
98 static int in_migration
;
100 typedef struct RAMBlock
{
104 struct RAMBlock
*next
;
107 static RAMBlock
*ram_blocks
;
108 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
109 then we can no longer assume contiguous ram offsets, and external uses
110 of this variable will break. */
111 ram_addr_t last_ram_offset
;
115 /* current CPU in the current thread. It is only valid inside
117 CPUState
*cpu_single_env
;
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
122 /* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
126 typedef struct PageDesc
{
127 /* list of TBs intersecting this ram page */
128 TranslationBlock
*first_tb
;
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count
;
132 uint8_t *code_bitmap
;
133 #if defined(CONFIG_USER_ONLY)
138 /* In system mode we want L1_MAP to be based on ram offsets,
139 while in user mode we want it to be based on virtual addresses. */
140 #if !defined(CONFIG_USER_ONLY)
141 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
144 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
147 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
150 /* Size of the L2 (and L3, etc) page tables. */
152 #define L2_SIZE (1 << L2_BITS)
154 /* The bits remaining after N lower levels of page tables. */
155 #define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 #define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160 /* Size of the L1 page table. Avoid silly small sizes. */
161 #if P_L1_BITS_REM < 4
162 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
164 #define P_L1_BITS P_L1_BITS_REM
167 #if V_L1_BITS_REM < 4
168 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
170 #define V_L1_BITS V_L1_BITS_REM
173 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 unsigned long qemu_real_host_page_size
;
180 unsigned long qemu_host_page_bits
;
181 unsigned long qemu_host_page_size
;
182 unsigned long qemu_host_page_mask
;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map
[V_L1_SIZE
];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc
{
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset
;
192 ram_addr_t region_offset
;
195 /* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197 static void *l1_phys_map
[P_L1_SIZE
];
199 static void io_mem_init(void);
201 /* io memory support */
202 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
203 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
204 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
205 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
206 static int io_mem_watch
;
211 static const char *logfilename
= "qemu.log";
213 static const char *logfilename
= "/tmp/qemu.log";
217 static int log_append
= 0;
220 #if !defined(CONFIG_USER_ONLY)
221 static int tlb_flush_count
;
223 static int tb_flush_count
;
224 static int tb_phys_invalidate_count
;
227 static void map_exec(void *addr
, long size
)
230 VirtualProtect(addr
, size
,
231 PAGE_EXECUTE_READWRITE
, &old_protect
);
235 static void map_exec(void *addr
, long size
)
237 unsigned long start
, end
, page_size
;
239 page_size
= getpagesize();
240 start
= (unsigned long)addr
;
241 start
&= ~(page_size
- 1);
243 end
= (unsigned long)addr
+ size
;
244 end
+= page_size
- 1;
245 end
&= ~(page_size
- 1);
247 mprotect((void *)start
, end
- start
,
248 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
252 static void page_init(void)
254 /* NOTE: we can always suppose that qemu_host_page_size >=
258 SYSTEM_INFO system_info
;
260 GetSystemInfo(&system_info
);
261 qemu_real_host_page_size
= system_info
.dwPageSize
;
264 qemu_real_host_page_size
= getpagesize();
266 if (qemu_host_page_size
== 0)
267 qemu_host_page_size
= qemu_real_host_page_size
;
268 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
269 qemu_host_page_size
= TARGET_PAGE_SIZE
;
270 qemu_host_page_bits
= 0;
271 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
272 qemu_host_page_bits
++;
273 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
275 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
279 last_brk
= (unsigned long)sbrk(0);
281 f
= fopen("/proc/self/maps", "r");
286 unsigned long startaddr
, endaddr
;
289 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
291 if (n
== 2 && h2g_valid(startaddr
)) {
292 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
294 if (h2g_valid(endaddr
)) {
295 endaddr
= h2g(endaddr
);
299 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
310 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
316 #if defined(CONFIG_USER_ONLY)
317 /* We can't use qemu_malloc because it may recurse into a locked mutex.
318 Neither can we record the new pages we reserve while allocating a
319 given page because that may recurse into an unallocated page table
320 entry. Stuff the allocations we do make into a queue and process
321 them after having completed one entire page table allocation. */
323 unsigned long reserve
[2 * (V_L1_SHIFT
/ L2_BITS
)];
326 # define ALLOC(P, SIZE) \
328 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
329 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
330 if (h2g_valid(P)) { \
331 reserve[reserve_idx] = h2g(P); \
332 reserve[reserve_idx + 1] = SIZE; \
337 # define ALLOC(P, SIZE) \
338 do { P = qemu_mallocz(SIZE); } while (0)
341 /* Level 1. Always allocated. */
342 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
345 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
352 ALLOC(p
, sizeof(void *) * L2_SIZE
);
356 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
364 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
369 #if defined(CONFIG_USER_ONLY)
370 for (i
= 0; i
< reserve_idx
; i
+= 2) {
371 unsigned long addr
= reserve
[i
];
372 unsigned long len
= reserve
[i
+ 1];
374 page_set_flags(addr
& TARGET_PAGE_MASK
,
375 TARGET_PAGE_ALIGN(addr
+ len
),
380 return pd
+ (index
& (L2_SIZE
- 1));
383 static inline PageDesc
*page_find(tb_page_addr_t index
)
385 return page_find_alloc(index
, 0);
388 #if !defined(CONFIG_USER_ONLY)
389 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
395 /* Level 1. Always allocated. */
396 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
399 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
405 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
407 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
418 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
420 for (i
= 0; i
< L2_SIZE
; i
++) {
421 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
422 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
426 return pd
+ (index
& (L2_SIZE
- 1));
429 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
431 return phys_page_find_alloc(index
, 0);
434 static void tlb_protect_code(ram_addr_t ram_addr
);
435 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
437 #define mmap_lock() do { } while(0)
438 #define mmap_unlock() do { } while(0)
441 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
443 #if defined(CONFIG_USER_ONLY)
444 /* Currently it is not recommended to allocate big chunks of data in
445 user mode. It will change when a dedicated libc will be used */
446 #define USE_STATIC_CODE_GEN_BUFFER
449 #ifdef USE_STATIC_CODE_GEN_BUFFER
450 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
453 static void code_gen_alloc(unsigned long tb_size
)
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 code_gen_buffer
= static_code_gen_buffer
;
457 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
458 map_exec(code_gen_buffer
, code_gen_buffer_size
);
460 code_gen_buffer_size
= tb_size
;
461 if (code_gen_buffer_size
== 0) {
462 #if defined(CONFIG_USER_ONLY)
463 /* in user mode, phys_ram_size is not meaningful */
464 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
466 /* XXX: needs adjustments */
467 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
470 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
471 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
472 /* The code gen buffer location may have constraints depending on
473 the host cpu and OS */
474 #if defined(__linux__)
479 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
480 #if defined(__x86_64__)
482 /* Cannot map more than that */
483 if (code_gen_buffer_size
> (800 * 1024 * 1024))
484 code_gen_buffer_size
= (800 * 1024 * 1024);
485 #elif defined(__sparc_v9__)
486 // Map the buffer below 2G, so we can use direct calls and branches
488 start
= (void *) 0x60000000UL
;
489 if (code_gen_buffer_size
> (512 * 1024 * 1024))
490 code_gen_buffer_size
= (512 * 1024 * 1024);
491 #elif defined(__arm__)
492 /* Map the buffer below 32M, so we can use direct calls and branches */
494 start
= (void *) 0x01000000UL
;
495 if (code_gen_buffer_size
> 16 * 1024 * 1024)
496 code_gen_buffer_size
= 16 * 1024 * 1024;
498 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
499 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
501 if (code_gen_buffer
== MAP_FAILED
) {
502 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
506 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
510 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
511 #if defined(__x86_64__)
512 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
513 * 0x40000000 is free */
515 addr
= (void *)0x40000000;
516 /* Cannot map more than that */
517 if (code_gen_buffer_size
> (800 * 1024 * 1024))
518 code_gen_buffer_size
= (800 * 1024 * 1024);
520 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
521 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
523 if (code_gen_buffer
== MAP_FAILED
) {
524 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
529 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
530 map_exec(code_gen_buffer
, code_gen_buffer_size
);
532 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
533 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
534 code_gen_buffer_max_size
= code_gen_buffer_size
-
535 code_gen_max_block_size();
536 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
537 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
540 /* Must be called before using the QEMU cpus. 'tb_size' is the size
541 (in bytes) allocated to the translation buffer. Zero means default
543 void cpu_exec_init_all(unsigned long tb_size
)
546 code_gen_alloc(tb_size
);
547 code_gen_ptr
= code_gen_buffer
;
549 #if !defined(CONFIG_USER_ONLY)
554 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
556 static int cpu_common_post_load(void *opaque
, int version_id
)
558 CPUState
*env
= opaque
;
560 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
561 version_id is increased. */
562 env
->interrupt_request
&= ~0x01;
568 static const VMStateDescription vmstate_cpu_common
= {
569 .name
= "cpu_common",
571 .minimum_version_id
= 1,
572 .minimum_version_id_old
= 1,
573 .post_load
= cpu_common_post_load
,
574 .fields
= (VMStateField
[]) {
575 VMSTATE_UINT32(halted
, CPUState
),
576 VMSTATE_UINT32(interrupt_request
, CPUState
),
577 VMSTATE_END_OF_LIST()
582 CPUState
*qemu_get_cpu(int cpu
)
584 CPUState
*env
= first_cpu
;
587 if (env
->cpu_index
== cpu
)
595 void cpu_exec_init(CPUState
*env
)
600 #if defined(CONFIG_USER_ONLY)
603 env
->next_cpu
= NULL
;
606 while (*penv
!= NULL
) {
607 penv
= &(*penv
)->next_cpu
;
610 env
->cpu_index
= cpu_index
;
612 QTAILQ_INIT(&env
->breakpoints
);
613 QTAILQ_INIT(&env
->watchpoints
);
615 #if defined(CONFIG_USER_ONLY)
618 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
619 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
620 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
621 cpu_save
, cpu_load
, env
);
625 static inline void invalidate_page_bitmap(PageDesc
*p
)
627 if (p
->code_bitmap
) {
628 qemu_free(p
->code_bitmap
);
629 p
->code_bitmap
= NULL
;
631 p
->code_write_count
= 0;
634 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
636 static void page_flush_tb_1 (int level
, void **lp
)
645 for (i
= 0; i
< L2_BITS
; ++i
) {
646 pd
[i
].first_tb
= NULL
;
647 invalidate_page_bitmap(pd
+ i
);
651 for (i
= 0; i
< L2_BITS
; ++i
) {
652 page_flush_tb_1 (level
- 1, pp
+ i
);
657 static void page_flush_tb(void)
660 for (i
= 0; i
< V_L1_SIZE
; i
++) {
661 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
665 /* flush all the translation blocks */
666 /* XXX: tb_flush is currently not thread safe */
667 void tb_flush(CPUState
*env1
)
670 #if defined(DEBUG_FLUSH)
671 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
672 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
674 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
676 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
677 cpu_abort(env1
, "Internal error: code buffer overflow\n");
681 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
682 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
685 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
688 code_gen_ptr
= code_gen_buffer
;
689 /* XXX: flush processor icache at this point if cache flush is
694 #ifdef DEBUG_TB_CHECK
696 static void tb_invalidate_check(target_ulong address
)
698 TranslationBlock
*tb
;
700 address
&= TARGET_PAGE_MASK
;
701 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
702 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
703 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
704 address
>= tb
->pc
+ tb
->size
)) {
705 printf("ERROR invalidate: address=" TARGET_FMT_lx
706 " PC=%08lx size=%04x\n",
707 address
, (long)tb
->pc
, tb
->size
);
713 /* verify that all the pages have correct rights for code */
714 static void tb_page_check(void)
716 TranslationBlock
*tb
;
717 int i
, flags1
, flags2
;
719 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
720 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
721 flags1
= page_get_flags(tb
->pc
);
722 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
723 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
724 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
725 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
733 /* invalidate one TB */
734 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
737 TranslationBlock
*tb1
;
741 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
744 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
748 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
750 TranslationBlock
*tb1
;
756 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
758 *ptb
= tb1
->page_next
[n1
];
761 ptb
= &tb1
->page_next
[n1
];
765 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
767 TranslationBlock
*tb1
, **ptb
;
770 ptb
= &tb
->jmp_next
[n
];
773 /* find tb(n) in circular list */
777 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
778 if (n1
== n
&& tb1
== tb
)
781 ptb
= &tb1
->jmp_first
;
783 ptb
= &tb1
->jmp_next
[n1
];
786 /* now we can suppress tb(n) from the list */
787 *ptb
= tb
->jmp_next
[n
];
789 tb
->jmp_next
[n
] = NULL
;
793 /* reset the jump entry 'n' of a TB so that it is not chained to
795 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
797 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
800 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
805 tb_page_addr_t phys_pc
;
806 TranslationBlock
*tb1
, *tb2
;
808 /* remove the TB from the hash list */
809 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
810 h
= tb_phys_hash_func(phys_pc
);
811 tb_remove(&tb_phys_hash
[h
], tb
,
812 offsetof(TranslationBlock
, phys_hash_next
));
814 /* remove the TB from the page list */
815 if (tb
->page_addr
[0] != page_addr
) {
816 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
817 tb_page_remove(&p
->first_tb
, tb
);
818 invalidate_page_bitmap(p
);
820 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
821 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
822 tb_page_remove(&p
->first_tb
, tb
);
823 invalidate_page_bitmap(p
);
826 tb_invalidated_flag
= 1;
828 /* remove the TB from the hash list */
829 h
= tb_jmp_cache_hash_func(tb
->pc
);
830 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
831 if (env
->tb_jmp_cache
[h
] == tb
)
832 env
->tb_jmp_cache
[h
] = NULL
;
835 /* suppress this TB from the two jump lists */
836 tb_jmp_remove(tb
, 0);
837 tb_jmp_remove(tb
, 1);
839 /* suppress any remaining jumps to this TB */
845 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
846 tb2
= tb1
->jmp_next
[n1
];
847 tb_reset_jump(tb1
, n1
);
848 tb1
->jmp_next
[n1
] = NULL
;
851 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
853 tb_phys_invalidate_count
++;
856 static inline void set_bits(uint8_t *tab
, int start
, int len
)
862 mask
= 0xff << (start
& 7);
863 if ((start
& ~7) == (end
& ~7)) {
865 mask
&= ~(0xff << (end
& 7));
870 start
= (start
+ 8) & ~7;
872 while (start
< end1
) {
877 mask
= ~(0xff << (end
& 7));
883 static void build_page_bitmap(PageDesc
*p
)
885 int n
, tb_start
, tb_end
;
886 TranslationBlock
*tb
;
888 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
893 tb
= (TranslationBlock
*)((long)tb
& ~3);
894 /* NOTE: this is subtle as a TB may span two physical pages */
896 /* NOTE: tb_end may be after the end of the page, but
897 it is not a problem */
898 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
899 tb_end
= tb_start
+ tb
->size
;
900 if (tb_end
> TARGET_PAGE_SIZE
)
901 tb_end
= TARGET_PAGE_SIZE
;
904 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
906 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
907 tb
= tb
->page_next
[n
];
911 TranslationBlock
*tb_gen_code(CPUState
*env
,
912 target_ulong pc
, target_ulong cs_base
,
913 int flags
, int cflags
)
915 TranslationBlock
*tb
;
917 tb_page_addr_t phys_pc
, phys_page2
;
918 target_ulong virt_page2
;
921 phys_pc
= get_page_addr_code(env
, pc
);
924 /* flush must be done */
926 /* cannot fail at this point */
928 /* Don't forget to invalidate previous TB info. */
929 tb_invalidated_flag
= 1;
931 tc_ptr
= code_gen_ptr
;
933 tb
->cs_base
= cs_base
;
936 cpu_gen_code(env
, tb
, &code_gen_size
);
937 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
939 /* check next page if needed */
940 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
942 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
943 phys_page2
= get_page_addr_code(env
, virt_page2
);
945 tb_link_page(tb
, phys_pc
, phys_page2
);
949 /* invalidate all TBs which intersect with the target physical page
950 starting in range [start;end[. NOTE: start and end must refer to
951 the same physical page. 'is_cpu_write_access' should be true if called
952 from a real cpu write access: the virtual CPU will exit the current
953 TB if code is modified inside this TB. */
954 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
955 int is_cpu_write_access
)
957 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
958 CPUState
*env
= cpu_single_env
;
959 tb_page_addr_t tb_start
, tb_end
;
962 #ifdef TARGET_HAS_PRECISE_SMC
963 int current_tb_not_found
= is_cpu_write_access
;
964 TranslationBlock
*current_tb
= NULL
;
965 int current_tb_modified
= 0;
966 target_ulong current_pc
= 0;
967 target_ulong current_cs_base
= 0;
968 int current_flags
= 0;
969 #endif /* TARGET_HAS_PRECISE_SMC */
971 p
= page_find(start
>> TARGET_PAGE_BITS
);
974 if (!p
->code_bitmap
&&
975 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
976 is_cpu_write_access
) {
977 /* build code bitmap */
978 build_page_bitmap(p
);
981 /* we remove all the TBs in the range [start, end[ */
982 /* XXX: see if in some cases it could be faster to invalidate all the code */
986 tb
= (TranslationBlock
*)((long)tb
& ~3);
987 tb_next
= tb
->page_next
[n
];
988 /* NOTE: this is subtle as a TB may span two physical pages */
990 /* NOTE: tb_end may be after the end of the page, but
991 it is not a problem */
992 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
993 tb_end
= tb_start
+ tb
->size
;
995 tb_start
= tb
->page_addr
[1];
996 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
998 if (!(tb_end
<= start
|| tb_start
>= end
)) {
999 #ifdef TARGET_HAS_PRECISE_SMC
1000 if (current_tb_not_found
) {
1001 current_tb_not_found
= 0;
1003 if (env
->mem_io_pc
) {
1004 /* now we have a real cpu fault */
1005 current_tb
= tb_find_pc(env
->mem_io_pc
);
1008 if (current_tb
== tb
&&
1009 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1010 /* If we are modifying the current TB, we must stop
1011 its execution. We could be more precise by checking
1012 that the modification is after the current PC, but it
1013 would require a specialized function to partially
1014 restore the CPU state */
1016 current_tb_modified
= 1;
1017 cpu_restore_state(current_tb
, env
,
1018 env
->mem_io_pc
, NULL
);
1019 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1022 #endif /* TARGET_HAS_PRECISE_SMC */
1023 /* we need to do that to handle the case where a signal
1024 occurs while doing tb_phys_invalidate() */
1027 saved_tb
= env
->current_tb
;
1028 env
->current_tb
= NULL
;
1030 tb_phys_invalidate(tb
, -1);
1032 env
->current_tb
= saved_tb
;
1033 if (env
->interrupt_request
&& env
->current_tb
)
1034 cpu_interrupt(env
, env
->interrupt_request
);
1039 #if !defined(CONFIG_USER_ONLY)
1040 /* if no code remaining, no need to continue to use slow writes */
1042 invalidate_page_bitmap(p
);
1043 if (is_cpu_write_access
) {
1044 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1048 #ifdef TARGET_HAS_PRECISE_SMC
1049 if (current_tb_modified
) {
1050 /* we generate a block containing just the instruction
1051 modifying the memory. It will ensure that it cannot modify
1053 env
->current_tb
= NULL
;
1054 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1055 cpu_resume_from_signal(env
, NULL
);
1060 /* len must be <= 8 and start must be a multiple of len */
1061 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1067 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1068 cpu_single_env
->mem_io_vaddr
, len
,
1069 cpu_single_env
->eip
,
1070 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1073 p
= page_find(start
>> TARGET_PAGE_BITS
);
1076 if (p
->code_bitmap
) {
1077 offset
= start
& ~TARGET_PAGE_MASK
;
1078 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1079 if (b
& ((1 << len
) - 1))
1083 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1087 #if !defined(CONFIG_SOFTMMU)
1088 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1089 unsigned long pc
, void *puc
)
1091 TranslationBlock
*tb
;
1094 #ifdef TARGET_HAS_PRECISE_SMC
1095 TranslationBlock
*current_tb
= NULL
;
1096 CPUState
*env
= cpu_single_env
;
1097 int current_tb_modified
= 0;
1098 target_ulong current_pc
= 0;
1099 target_ulong current_cs_base
= 0;
1100 int current_flags
= 0;
1103 addr
&= TARGET_PAGE_MASK
;
1104 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1108 #ifdef TARGET_HAS_PRECISE_SMC
1109 if (tb
&& pc
!= 0) {
1110 current_tb
= tb_find_pc(pc
);
1113 while (tb
!= NULL
) {
1115 tb
= (TranslationBlock
*)((long)tb
& ~3);
1116 #ifdef TARGET_HAS_PRECISE_SMC
1117 if (current_tb
== tb
&&
1118 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1119 /* If we are modifying the current TB, we must stop
1120 its execution. We could be more precise by checking
1121 that the modification is after the current PC, but it
1122 would require a specialized function to partially
1123 restore the CPU state */
1125 current_tb_modified
= 1;
1126 cpu_restore_state(current_tb
, env
, pc
, puc
);
1127 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1130 #endif /* TARGET_HAS_PRECISE_SMC */
1131 tb_phys_invalidate(tb
, addr
);
1132 tb
= tb
->page_next
[n
];
1135 #ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_modified
) {
1137 /* we generate a block containing just the instruction
1138 modifying the memory. It will ensure that it cannot modify
1140 env
->current_tb
= NULL
;
1141 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1142 cpu_resume_from_signal(env
, puc
);
1148 /* add the tb in the target page and protect it if necessary */
1149 static inline void tb_alloc_page(TranslationBlock
*tb
,
1150 unsigned int n
, tb_page_addr_t page_addr
)
1153 TranslationBlock
*last_first_tb
;
1155 tb
->page_addr
[n
] = page_addr
;
1156 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1157 tb
->page_next
[n
] = p
->first_tb
;
1158 last_first_tb
= p
->first_tb
;
1159 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1160 invalidate_page_bitmap(p
);
1162 #if defined(TARGET_HAS_SMC) || 1
1164 #if defined(CONFIG_USER_ONLY)
1165 if (p
->flags
& PAGE_WRITE
) {
1170 /* force the host page as non writable (writes will have a
1171 page fault + mprotect overhead) */
1172 page_addr
&= qemu_host_page_mask
;
1174 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1175 addr
+= TARGET_PAGE_SIZE
) {
1177 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1181 p2
->flags
&= ~PAGE_WRITE
;
1182 page_get_flags(addr
);
1184 mprotect(g2h(page_addr
), qemu_host_page_size
,
1185 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1186 #ifdef DEBUG_TB_INVALIDATE
1187 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1192 /* if some code is already present, then the pages are already
1193 protected. So we handle the case where only the first TB is
1194 allocated in a physical page */
1195 if (!last_first_tb
) {
1196 tlb_protect_code(page_addr
);
1200 #endif /* TARGET_HAS_SMC */
1203 /* Allocate a new translation block. Flush the translation buffer if
1204 too many translation blocks or too much generated code. */
1205 TranslationBlock
*tb_alloc(target_ulong pc
)
1207 TranslationBlock
*tb
;
1209 if (nb_tbs
>= code_gen_max_blocks
||
1210 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1212 tb
= &tbs
[nb_tbs
++];
1218 void tb_free(TranslationBlock
*tb
)
1220 /* In practice this is mostly used for single use temporary TB
1221 Ignore the hard cases and just back up if this TB happens to
1222 be the last one generated. */
1223 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1224 code_gen_ptr
= tb
->tc_ptr
;
1229 /* add a new TB and link it to the physical page tables. phys_page2 is
1230 (-1) to indicate that only one page contains the TB. */
1231 void tb_link_page(TranslationBlock
*tb
,
1232 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1235 TranslationBlock
**ptb
;
1237 /* Grab the mmap lock to stop another thread invalidating this TB
1238 before we are done. */
1240 /* add in the physical hash table */
1241 h
= tb_phys_hash_func(phys_pc
);
1242 ptb
= &tb_phys_hash
[h
];
1243 tb
->phys_hash_next
= *ptb
;
1246 /* add in the page list */
1247 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1248 if (phys_page2
!= -1)
1249 tb_alloc_page(tb
, 1, phys_page2
);
1251 tb
->page_addr
[1] = -1;
1253 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1254 tb
->jmp_next
[0] = NULL
;
1255 tb
->jmp_next
[1] = NULL
;
1257 /* init original jump addresses */
1258 if (tb
->tb_next_offset
[0] != 0xffff)
1259 tb_reset_jump(tb
, 0);
1260 if (tb
->tb_next_offset
[1] != 0xffff)
1261 tb_reset_jump(tb
, 1);
1263 #ifdef DEBUG_TB_CHECK
1269 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1270 tb[1].tc_ptr. Return NULL if not found */
1271 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1273 int m_min
, m_max
, m
;
1275 TranslationBlock
*tb
;
1279 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1280 tc_ptr
>= (unsigned long)code_gen_ptr
)
1282 /* binary search (cf Knuth) */
1285 while (m_min
<= m_max
) {
1286 m
= (m_min
+ m_max
) >> 1;
1288 v
= (unsigned long)tb
->tc_ptr
;
1291 else if (tc_ptr
< v
) {
1300 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1302 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1304 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1307 tb1
= tb
->jmp_next
[n
];
1309 /* find head of list */
1312 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1315 tb1
= tb1
->jmp_next
[n1
];
1317 /* we are now sure now that tb jumps to tb1 */
1320 /* remove tb from the jmp_first list */
1321 ptb
= &tb_next
->jmp_first
;
1325 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1326 if (n1
== n
&& tb1
== tb
)
1328 ptb
= &tb1
->jmp_next
[n1
];
1330 *ptb
= tb
->jmp_next
[n
];
1331 tb
->jmp_next
[n
] = NULL
;
1333 /* suppress the jump to next tb in generated code */
1334 tb_reset_jump(tb
, n
);
1336 /* suppress jumps in the tb on which we could have jumped */
1337 tb_reset_jump_recursive(tb_next
);
1341 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1343 tb_reset_jump_recursive2(tb
, 0);
1344 tb_reset_jump_recursive2(tb
, 1);
1347 #if defined(TARGET_HAS_ICE)
1348 #if defined(CONFIG_USER_ONLY)
1349 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1351 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1354 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1356 target_phys_addr_t addr
;
1358 ram_addr_t ram_addr
;
1361 addr
= cpu_get_phys_page_debug(env
, pc
);
1362 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1364 pd
= IO_MEM_UNASSIGNED
;
1366 pd
= p
->phys_offset
;
1368 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1369 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1372 #endif /* TARGET_HAS_ICE */
1374 #if defined(CONFIG_USER_ONLY)
1375 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1380 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1381 int flags
, CPUWatchpoint
**watchpoint
)
1386 /* Add a watchpoint. */
1387 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1388 int flags
, CPUWatchpoint
**watchpoint
)
1390 target_ulong len_mask
= ~(len
- 1);
1393 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1394 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1395 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1396 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1399 wp
= qemu_malloc(sizeof(*wp
));
1402 wp
->len_mask
= len_mask
;
1405 /* keep all GDB-injected watchpoints in front */
1407 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1409 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1411 tlb_flush_page(env
, addr
);
1418 /* Remove a specific watchpoint. */
1419 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1422 target_ulong len_mask
= ~(len
- 1);
1425 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1426 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1427 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1428 cpu_watchpoint_remove_by_ref(env
, wp
);
1435 /* Remove a specific watchpoint by reference. */
1436 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1438 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1440 tlb_flush_page(env
, watchpoint
->vaddr
);
1442 qemu_free(watchpoint
);
1445 /* Remove all matching watchpoints. */
1446 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1448 CPUWatchpoint
*wp
, *next
;
1450 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1451 if (wp
->flags
& mask
)
1452 cpu_watchpoint_remove_by_ref(env
, wp
);
1457 /* Add a breakpoint. */
1458 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1459 CPUBreakpoint
**breakpoint
)
1461 #if defined(TARGET_HAS_ICE)
1464 bp
= qemu_malloc(sizeof(*bp
));
1469 /* keep all GDB-injected breakpoints in front */
1471 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1473 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1475 breakpoint_invalidate(env
, pc
);
1485 /* Remove a specific breakpoint. */
1486 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1488 #if defined(TARGET_HAS_ICE)
1491 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1492 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1493 cpu_breakpoint_remove_by_ref(env
, bp
);
1503 /* Remove a specific breakpoint by reference. */
1504 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1506 #if defined(TARGET_HAS_ICE)
1507 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1509 breakpoint_invalidate(env
, breakpoint
->pc
);
1511 qemu_free(breakpoint
);
1515 /* Remove all matching breakpoints. */
1516 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1518 #if defined(TARGET_HAS_ICE)
1519 CPUBreakpoint
*bp
, *next
;
1521 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1522 if (bp
->flags
& mask
)
1523 cpu_breakpoint_remove_by_ref(env
, bp
);
1528 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1529 CPU loop after each instruction */
1530 void cpu_single_step(CPUState
*env
, int enabled
)
1532 #if defined(TARGET_HAS_ICE)
1533 if (env
->singlestep_enabled
!= enabled
) {
1534 env
->singlestep_enabled
= enabled
;
1536 kvm_update_guest_debug(env
, 0);
1538 /* must flush all the translated code to avoid inconsistencies */
1539 /* XXX: only flush what is necessary */
1546 /* enable or disable low levels log */
1547 void cpu_set_log(int log_flags
)
1549 loglevel
= log_flags
;
1550 if (loglevel
&& !logfile
) {
1551 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1553 perror(logfilename
);
1556 #if !defined(CONFIG_SOFTMMU)
1557 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1559 static char logfile_buf
[4096];
1560 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1562 #elif !defined(_WIN32)
1563 /* Win32 doesn't support line-buffering and requires size >= 2 */
1564 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1568 if (!loglevel
&& logfile
) {
1574 void cpu_set_log_filename(const char *filename
)
1576 logfilename
= strdup(filename
);
1581 cpu_set_log(loglevel
);
1584 static void cpu_unlink_tb(CPUState
*env
)
1586 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1587 problem and hope the cpu will stop of its own accord. For userspace
1588 emulation this often isn't actually as bad as it sounds. Often
1589 signals are used primarily to interrupt blocking syscalls. */
1590 TranslationBlock
*tb
;
1591 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1593 spin_lock(&interrupt_lock
);
1594 tb
= env
->current_tb
;
1595 /* if the cpu is currently executing code, we must unlink it and
1596 all the potentially executing TB */
1598 env
->current_tb
= NULL
;
1599 tb_reset_jump_recursive(tb
);
1601 spin_unlock(&interrupt_lock
);
1604 /* mask must never be zero, except for A20 change call */
1605 void cpu_interrupt(CPUState
*env
, int mask
)
1609 old_mask
= env
->interrupt_request
;
1610 env
->interrupt_request
|= mask
;
1612 #ifndef CONFIG_USER_ONLY
1614 * If called from iothread context, wake the target cpu in
1617 if (!qemu_cpu_self(env
)) {
1624 env
->icount_decr
.u16
.high
= 0xffff;
1625 #ifndef CONFIG_USER_ONLY
1627 && (mask
& ~old_mask
) != 0) {
1628 cpu_abort(env
, "Raised interrupt while not in I/O function");
1636 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1638 env
->interrupt_request
&= ~mask
;
1641 void cpu_exit(CPUState
*env
)
1643 env
->exit_request
= 1;
1647 const CPULogItem cpu_log_items
[] = {
1648 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1649 "show generated host assembly code for each compiled TB" },
1650 { CPU_LOG_TB_IN_ASM
, "in_asm",
1651 "show target assembly code for each compiled TB" },
1652 { CPU_LOG_TB_OP
, "op",
1653 "show micro ops for each compiled TB" },
1654 { CPU_LOG_TB_OP_OPT
, "op_opt",
1657 "before eflags optimization and "
1659 "after liveness analysis" },
1660 { CPU_LOG_INT
, "int",
1661 "show interrupts/exceptions in short format" },
1662 { CPU_LOG_EXEC
, "exec",
1663 "show trace before each executed TB (lots of logs)" },
1664 { CPU_LOG_TB_CPU
, "cpu",
1665 "show CPU state before block translation" },
1667 { CPU_LOG_PCALL
, "pcall",
1668 "show protected mode far calls/returns/exceptions" },
1669 { CPU_LOG_RESET
, "cpu_reset",
1670 "show CPU state before CPU resets" },
1673 { CPU_LOG_IOPORT
, "ioport",
1674 "show all i/o ports accesses" },
1679 #ifndef CONFIG_USER_ONLY
1680 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1681 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1683 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1685 ram_addr_t phys_offset
)
1687 CPUPhysMemoryClient
*client
;
1688 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1689 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1693 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1694 target_phys_addr_t end
)
1696 CPUPhysMemoryClient
*client
;
1697 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1698 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1705 static int cpu_notify_migration_log(int enable
)
1707 CPUPhysMemoryClient
*client
;
1708 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1709 int r
= client
->migration_log(client
, enable
);
1716 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1717 int level
, void **lp
)
1725 PhysPageDesc
*pd
= *lp
;
1726 for (i
= 0; i
< L2_BITS
; ++i
) {
1727 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1728 client
->set_memory(client
, pd
[i
].region_offset
,
1729 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1734 for (i
= 0; i
< L2_BITS
; ++i
) {
1735 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1740 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1743 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1744 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1749 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1751 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1752 phys_page_for_each(client
);
1755 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1757 QLIST_REMOVE(client
, list
);
1761 static int cmp1(const char *s1
, int n
, const char *s2
)
1763 if (strlen(s2
) != n
)
1765 return memcmp(s1
, s2
, n
) == 0;
1768 /* takes a comma separated list of log masks. Return 0 if error. */
1769 int cpu_str_to_log_mask(const char *str
)
1771 const CPULogItem
*item
;
1778 p1
= strchr(p
, ',');
1781 if(cmp1(p
,p1
-p
,"all")) {
1782 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1786 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1787 if (cmp1(p
, p1
- p
, item
->name
))
1801 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1808 fprintf(stderr
, "qemu: fatal: ");
1809 vfprintf(stderr
, fmt
, ap
);
1810 fprintf(stderr
, "\n");
1812 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1814 cpu_dump_state(env
, stderr
, fprintf
, 0);
1816 if (qemu_log_enabled()) {
1817 qemu_log("qemu: fatal: ");
1818 qemu_log_vprintf(fmt
, ap2
);
1821 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1823 log_cpu_state(env
, 0);
1830 #if defined(CONFIG_USER_ONLY)
1832 struct sigaction act
;
1833 sigfillset(&act
.sa_mask
);
1834 act
.sa_handler
= SIG_DFL
;
1835 sigaction(SIGABRT
, &act
, NULL
);
1841 CPUState
*cpu_copy(CPUState
*env
)
1843 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1844 CPUState
*next_cpu
= new_env
->next_cpu
;
1845 int cpu_index
= new_env
->cpu_index
;
1846 #if defined(TARGET_HAS_ICE)
1851 memcpy(new_env
, env
, sizeof(CPUState
));
1853 /* Preserve chaining and index. */
1854 new_env
->next_cpu
= next_cpu
;
1855 new_env
->cpu_index
= cpu_index
;
1857 /* Clone all break/watchpoints.
1858 Note: Once we support ptrace with hw-debug register access, make sure
1859 BP_CPU break/watchpoints are handled correctly on clone. */
1860 QTAILQ_INIT(&env
->breakpoints
);
1861 QTAILQ_INIT(&env
->watchpoints
);
1862 #if defined(TARGET_HAS_ICE)
1863 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1864 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1866 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1867 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1875 #if !defined(CONFIG_USER_ONLY)
1877 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1881 /* Discard jump cache entries for any tb which might potentially
1882 overlap the flushed page. */
1883 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1884 memset (&env
->tb_jmp_cache
[i
], 0,
1885 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1887 i
= tb_jmp_cache_hash_page(addr
);
1888 memset (&env
->tb_jmp_cache
[i
], 0,
1889 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1892 static CPUTLBEntry s_cputlb_empty_entry
= {
1899 /* NOTE: if flush_global is true, also flush global entries (not
1901 void tlb_flush(CPUState
*env
, int flush_global
)
1905 #if defined(DEBUG_TLB)
1906 printf("tlb_flush:\n");
1908 /* must reset current TB so that interrupts cannot modify the
1909 links while we are modifying them */
1910 env
->current_tb
= NULL
;
1912 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1914 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1915 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1919 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1924 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1926 if (addr
== (tlb_entry
->addr_read
&
1927 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1928 addr
== (tlb_entry
->addr_write
&
1929 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1930 addr
== (tlb_entry
->addr_code
&
1931 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1932 *tlb_entry
= s_cputlb_empty_entry
;
1936 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1941 #if defined(DEBUG_TLB)
1942 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1944 /* must reset current TB so that interrupts cannot modify the
1945 links while we are modifying them */
1946 env
->current_tb
= NULL
;
1948 addr
&= TARGET_PAGE_MASK
;
1949 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1950 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1951 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1953 tlb_flush_jmp_cache(env
, addr
);
1956 /* update the TLBs so that writes to code in the virtual page 'addr'
1958 static void tlb_protect_code(ram_addr_t ram_addr
)
1960 cpu_physical_memory_reset_dirty(ram_addr
,
1961 ram_addr
+ TARGET_PAGE_SIZE
,
1965 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1966 tested for self modifying code */
1967 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1970 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1973 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1974 unsigned long start
, unsigned long length
)
1977 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1978 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1979 if ((addr
- start
) < length
) {
1980 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1985 /* Note: start and end must be within the same ram block. */
1986 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1990 unsigned long length
, start1
;
1994 start
&= TARGET_PAGE_MASK
;
1995 end
= TARGET_PAGE_ALIGN(end
);
1997 length
= end
- start
;
2000 len
= length
>> TARGET_PAGE_BITS
;
2001 mask
= ~dirty_flags
;
2002 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
2003 for(i
= 0; i
< len
; i
++)
2006 /* we modify the TLB cache so that the dirty bit will be set again
2007 when accessing the range */
2008 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2009 /* Chek that we don't span multiple blocks - this breaks the
2010 address comparisons below. */
2011 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2012 != (end
- 1) - start
) {
2016 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2018 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2019 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2020 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2026 int cpu_physical_memory_set_dirty_tracking(int enable
)
2029 in_migration
= enable
;
2030 ret
= cpu_notify_migration_log(!!enable
);
2034 int cpu_physical_memory_get_dirty_tracking(void)
2036 return in_migration
;
2039 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2040 target_phys_addr_t end_addr
)
2044 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2048 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2050 ram_addr_t ram_addr
;
2053 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2054 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2055 + tlb_entry
->addend
);
2056 ram_addr
= qemu_ram_addr_from_host(p
);
2057 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2058 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2063 /* update the TLB according to the current state of the dirty bits */
2064 void cpu_tlb_update_dirty(CPUState
*env
)
2068 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2069 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2070 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2074 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2076 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2077 tlb_entry
->addr_write
= vaddr
;
2080 /* update the TLB corresponding to virtual page vaddr
2081 so that it is no longer dirty */
2082 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2087 vaddr
&= TARGET_PAGE_MASK
;
2088 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2089 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2090 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2093 /* add a new TLB entry. At most one entry for a given virtual address
2094 is permitted. Return 0 if OK or 2 if the page could not be mapped
2095 (can only happen in non SOFTMMU mode for I/O pages or pages
2096 conflicting with the host address space). */
2097 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2098 target_phys_addr_t paddr
, int prot
,
2099 int mmu_idx
, int is_softmmu
)
2104 target_ulong address
;
2105 target_ulong code_address
;
2106 target_phys_addr_t addend
;
2110 target_phys_addr_t iotlb
;
2112 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2114 pd
= IO_MEM_UNASSIGNED
;
2116 pd
= p
->phys_offset
;
2118 #if defined(DEBUG_TLB)
2119 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2120 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2125 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2126 /* IO memory case (romd handled later) */
2127 address
|= TLB_MMIO
;
2129 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2130 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2132 iotlb
= pd
& TARGET_PAGE_MASK
;
2133 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2134 iotlb
|= IO_MEM_NOTDIRTY
;
2136 iotlb
|= IO_MEM_ROM
;
2138 /* IO handlers are currently passed a physical address.
2139 It would be nice to pass an offset from the base address
2140 of that region. This would avoid having to special case RAM,
2141 and avoid full address decoding in every device.
2142 We can't use the high bits of pd for this because
2143 IO_MEM_ROMD uses these as a ram address. */
2144 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2146 iotlb
+= p
->region_offset
;
2152 code_address
= address
;
2153 /* Make accesses to pages with watchpoints go via the
2154 watchpoint trap routines. */
2155 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2156 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2157 iotlb
= io_mem_watch
+ paddr
;
2158 /* TODO: The memory case can be optimized by not trapping
2159 reads of pages with a write breakpoint. */
2160 address
|= TLB_MMIO
;
2164 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2165 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2166 te
= &env
->tlb_table
[mmu_idx
][index
];
2167 te
->addend
= addend
- vaddr
;
2168 if (prot
& PAGE_READ
) {
2169 te
->addr_read
= address
;
2174 if (prot
& PAGE_EXEC
) {
2175 te
->addr_code
= code_address
;
2179 if (prot
& PAGE_WRITE
) {
2180 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2181 (pd
& IO_MEM_ROMD
)) {
2182 /* Write access calls the I/O callback. */
2183 te
->addr_write
= address
| TLB_MMIO
;
2184 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2185 !cpu_physical_memory_is_dirty(pd
)) {
2186 te
->addr_write
= address
| TLB_NOTDIRTY
;
2188 te
->addr_write
= address
;
2191 te
->addr_write
= -1;
2198 void tlb_flush(CPUState
*env
, int flush_global
)
2202 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2207 * Walks guest process memory "regions" one by one
2208 * and calls callback function 'fn' for each region.
2211 struct walk_memory_regions_data
2213 walk_memory_regions_fn fn
;
2215 unsigned long start
;
2219 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2220 unsigned long end
, int new_prot
)
2222 if (data
->start
!= -1ul) {
2223 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2229 data
->start
= (new_prot
? end
: -1ul);
2230 data
->prot
= new_prot
;
2235 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2236 unsigned long base
, int level
, void **lp
)
2242 return walk_memory_regions_end(data
, base
, 0);
2247 for (i
= 0; i
< L2_BITS
; ++i
) {
2248 int prot
= pd
[i
].flags
;
2250 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2251 if (prot
!= data
->prot
) {
2252 rc
= walk_memory_regions_end(data
, pa
, prot
);
2260 for (i
= 0; i
< L2_BITS
; ++i
) {
2261 pa
= base
| (i
<< (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2262 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2272 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2274 struct walk_memory_regions_data data
;
2282 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2283 int rc
= walk_memory_regions_1(&data
, i
<< V_L1_SHIFT
,
2284 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2290 return walk_memory_regions_end(&data
, 0, 0);
2293 static int dump_region(void *priv
, unsigned long start
,
2294 unsigned long end
, unsigned long prot
)
2296 FILE *f
= (FILE *)priv
;
2298 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2299 start
, end
, end
- start
,
2300 ((prot
& PAGE_READ
) ? 'r' : '-'),
2301 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2302 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2307 /* dump memory mappings */
2308 void page_dump(FILE *f
)
2310 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2311 "start", "end", "size", "prot");
2312 walk_memory_regions(f
, dump_region
);
2315 int page_get_flags(target_ulong address
)
2319 p
= page_find(address
>> TARGET_PAGE_BITS
);
2325 /* Modify the flags of a page and invalidate the code if necessary.
2326 The flag PAGE_WRITE_ORG is positioned automatically depending
2327 on PAGE_WRITE. The mmap_lock should already be held. */
2328 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2330 target_ulong addr
, len
;
2332 /* This function should never be called with addresses outside the
2333 guest address space. If this assert fires, it probably indicates
2334 a missing call to h2g_valid. */
2335 #if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
2336 assert(end
< (1ul << L1_MAP_ADDR_SPACE_BITS
));
2338 assert(start
< end
);
2340 start
= start
& TARGET_PAGE_MASK
;
2341 end
= TARGET_PAGE_ALIGN(end
);
2343 if (flags
& PAGE_WRITE
) {
2344 flags
|= PAGE_WRITE_ORG
;
2347 for (addr
= start
, len
= end
- start
;
2349 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2350 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2352 /* If the write protection bit is set, then we invalidate
2354 if (!(p
->flags
& PAGE_WRITE
) &&
2355 (flags
& PAGE_WRITE
) &&
2357 tb_invalidate_phys_page(addr
, 0, NULL
);
2363 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2369 /* This function should never be called with addresses outside the
2370 guest address space. If this assert fires, it probably indicates
2371 a missing call to h2g_valid. */
2372 #if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
2373 assert(start
< (1ul << L1_MAP_ADDR_SPACE_BITS
));
2376 if (start
+ len
- 1 < start
) {
2377 /* We've wrapped around. */
2381 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2382 start
= start
& TARGET_PAGE_MASK
;
2384 for (addr
= start
, len
= end
- start
;
2386 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2387 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2390 if( !(p
->flags
& PAGE_VALID
) )
2393 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2395 if (flags
& PAGE_WRITE
) {
2396 if (!(p
->flags
& PAGE_WRITE_ORG
))
2398 /* unprotect the page if it was put read-only because it
2399 contains translated code */
2400 if (!(p
->flags
& PAGE_WRITE
)) {
2401 if (!page_unprotect(addr
, 0, NULL
))
2410 /* called from signal handler: invalidate the code and unprotect the
2411 page. Return TRUE if the fault was successfully handled. */
2412 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2414 unsigned int page_index
, prot
, pindex
;
2416 target_ulong host_start
, host_end
, addr
;
2418 /* Technically this isn't safe inside a signal handler. However we
2419 know this only ever happens in a synchronous SEGV handler, so in
2420 practice it seems to be ok. */
2423 host_start
= address
& qemu_host_page_mask
;
2424 page_index
= host_start
>> TARGET_PAGE_BITS
;
2425 p1
= page_find(page_index
);
2430 host_end
= host_start
+ qemu_host_page_size
;
2433 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2437 /* if the page was really writable, then we change its
2438 protection back to writable */
2439 if (prot
& PAGE_WRITE_ORG
) {
2440 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2441 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2442 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2443 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2444 p1
[pindex
].flags
|= PAGE_WRITE
;
2445 /* and since the content will be modified, we must invalidate
2446 the corresponding translated code. */
2447 tb_invalidate_phys_page(address
, pc
, puc
);
2448 #ifdef DEBUG_TB_CHECK
2449 tb_invalidate_check(address
);
2459 static inline void tlb_set_dirty(CPUState
*env
,
2460 unsigned long addr
, target_ulong vaddr
)
2463 #endif /* defined(CONFIG_USER_ONLY) */
2465 #if !defined(CONFIG_USER_ONLY)
2467 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2468 typedef struct subpage_t
{
2469 target_phys_addr_t base
;
2470 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
2471 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
2472 void *opaque
[TARGET_PAGE_SIZE
][2][4];
2473 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
2476 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2477 ram_addr_t memory
, ram_addr_t region_offset
);
2478 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2479 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2480 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2483 if (addr > start_addr) \
2486 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2487 if (start_addr2 > 0) \
2491 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2492 end_addr2 = TARGET_PAGE_SIZE - 1; \
2494 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2495 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2500 /* register physical memory.
2501 For RAM, 'size' must be a multiple of the target page size.
2502 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2503 io memory page. The address used when calling the IO function is
2504 the offset from the start of the region, plus region_offset. Both
2505 start_addr and region_offset are rounded down to a page boundary
2506 before calculating this offset. This should not be a problem unless
2507 the low bits of start_addr and region_offset differ. */
2508 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2510 ram_addr_t phys_offset
,
2511 ram_addr_t region_offset
)
2513 target_phys_addr_t addr
, end_addr
;
2516 ram_addr_t orig_size
= size
;
2519 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2521 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2522 region_offset
= start_addr
;
2524 region_offset
&= TARGET_PAGE_MASK
;
2525 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2526 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2527 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2528 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2529 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2530 ram_addr_t orig_memory
= p
->phys_offset
;
2531 target_phys_addr_t start_addr2
, end_addr2
;
2532 int need_subpage
= 0;
2534 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2536 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2537 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2538 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2539 &p
->phys_offset
, orig_memory
,
2542 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2545 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2547 p
->region_offset
= 0;
2549 p
->phys_offset
= phys_offset
;
2550 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2551 (phys_offset
& IO_MEM_ROMD
))
2552 phys_offset
+= TARGET_PAGE_SIZE
;
2555 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2556 p
->phys_offset
= phys_offset
;
2557 p
->region_offset
= region_offset
;
2558 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2559 (phys_offset
& IO_MEM_ROMD
)) {
2560 phys_offset
+= TARGET_PAGE_SIZE
;
2562 target_phys_addr_t start_addr2
, end_addr2
;
2563 int need_subpage
= 0;
2565 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2566 end_addr2
, need_subpage
);
2568 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2569 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2570 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2571 addr
& TARGET_PAGE_MASK
);
2572 subpage_register(subpage
, start_addr2
, end_addr2
,
2573 phys_offset
, region_offset
);
2574 p
->region_offset
= 0;
2578 region_offset
+= TARGET_PAGE_SIZE
;
2581 /* since each CPU stores ram addresses in its TLB cache, we must
2582 reset the modified entries */
2584 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2589 /* XXX: temporary until new memory mapping API */
2590 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2594 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2596 return IO_MEM_UNASSIGNED
;
2597 return p
->phys_offset
;
2600 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2603 kvm_coalesce_mmio_region(addr
, size
);
2606 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2609 kvm_uncoalesce_mmio_region(addr
, size
);
2612 void qemu_flush_coalesced_mmio_buffer(void)
2615 kvm_flush_coalesced_mmio_buffer();
2618 #if defined(__linux__) && !defined(TARGET_S390X)
2620 #include <sys/vfs.h>
2622 #define HUGETLBFS_MAGIC 0x958458f6
2624 static long gethugepagesize(const char *path
)
2630 ret
= statfs(path
, &fs
);
2631 } while (ret
!= 0 && errno
== EINTR
);
2638 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2639 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2644 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2652 unsigned long hpagesize
;
2654 hpagesize
= gethugepagesize(path
);
2659 if (memory
< hpagesize
) {
2663 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2664 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2668 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2672 fd
= mkstemp(filename
);
2681 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2684 * ftruncate is not supported by hugetlbfs in older
2685 * hosts, so don't bother bailing out on errors.
2686 * If anything goes wrong with it under other filesystems,
2689 if (ftruncate(fd
, memory
))
2690 perror("ftruncate");
2693 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2694 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2695 * to sidestep this quirk.
2697 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2698 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2700 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2702 if (area
== MAP_FAILED
) {
2703 perror("file_ram_alloc: can't mmap RAM pages");
2711 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2713 RAMBlock
*new_block
;
2715 size
= TARGET_PAGE_ALIGN(size
);
2716 new_block
= qemu_malloc(sizeof(*new_block
));
2719 #if defined (__linux__) && !defined(TARGET_S390X)
2720 new_block
->host
= file_ram_alloc(size
, mem_path
);
2721 if (!new_block
->host
)
2724 fprintf(stderr
, "-mem-path option unsupported\n");
2728 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2729 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2730 new_block
->host
= mmap((void*)0x1000000, size
,
2731 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2732 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2734 new_block
->host
= qemu_vmalloc(size
);
2736 #ifdef MADV_MERGEABLE
2737 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2740 new_block
->offset
= last_ram_offset
;
2741 new_block
->length
= size
;
2743 new_block
->next
= ram_blocks
;
2744 ram_blocks
= new_block
;
2746 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2747 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2748 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2749 0xff, size
>> TARGET_PAGE_BITS
);
2751 last_ram_offset
+= size
;
2754 kvm_setup_guest_memory(new_block
->host
, size
);
2756 return new_block
->offset
;
2759 void qemu_ram_free(ram_addr_t addr
)
2761 /* TODO: implement this. */
2764 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2765 With the exception of the softmmu code in this file, this should
2766 only be used for local memory (e.g. video ram) that the device owns,
2767 and knows it isn't going to access beyond the end of the block.
2769 It should not be used for general purpose DMA.
2770 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2772 void *qemu_get_ram_ptr(ram_addr_t addr
)
2779 prevp
= &ram_blocks
;
2781 while (block
&& (block
->offset
> addr
2782 || block
->offset
+ block
->length
<= addr
)) {
2784 prevp
= &prev
->next
;
2786 block
= block
->next
;
2789 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2792 /* Move this entry to to start of the list. */
2794 prev
->next
= block
->next
;
2795 block
->next
= *prevp
;
2798 return block
->host
+ (addr
- block
->offset
);
2801 /* Some of the softmmu routines need to translate from a host pointer
2802 (typically a TLB entry) back to a ram offset. */
2803 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2807 uint8_t *host
= ptr
;
2811 while (block
&& (block
->host
> host
2812 || block
->host
+ block
->length
<= host
)) {
2814 block
= block
->next
;
2817 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2820 return block
->offset
+ (host
- block
->host
);
2823 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2825 #ifdef DEBUG_UNASSIGNED
2826 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2828 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2829 do_unassigned_access(addr
, 0, 0, 0, 1);
2834 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2836 #ifdef DEBUG_UNASSIGNED
2837 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2839 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2840 do_unassigned_access(addr
, 0, 0, 0, 2);
2845 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2847 #ifdef DEBUG_UNASSIGNED
2848 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2850 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2851 do_unassigned_access(addr
, 0, 0, 0, 4);
2856 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2858 #ifdef DEBUG_UNASSIGNED
2859 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2861 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2862 do_unassigned_access(addr
, 1, 0, 0, 1);
2866 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2868 #ifdef DEBUG_UNASSIGNED
2869 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2871 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2872 do_unassigned_access(addr
, 1, 0, 0, 2);
2876 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2878 #ifdef DEBUG_UNASSIGNED
2879 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2881 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2882 do_unassigned_access(addr
, 1, 0, 0, 4);
2886 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2887 unassigned_mem_readb
,
2888 unassigned_mem_readw
,
2889 unassigned_mem_readl
,
2892 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2893 unassigned_mem_writeb
,
2894 unassigned_mem_writew
,
2895 unassigned_mem_writel
,
2898 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2902 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2903 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2904 #if !defined(CONFIG_USER_ONLY)
2905 tb_invalidate_phys_page_fast(ram_addr
, 1);
2906 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2909 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2910 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2911 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2912 /* we remove the notdirty callback only if the code has been
2914 if (dirty_flags
== 0xff)
2915 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2918 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2922 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2923 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2924 #if !defined(CONFIG_USER_ONLY)
2925 tb_invalidate_phys_page_fast(ram_addr
, 2);
2926 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2929 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2930 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2931 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2932 /* we remove the notdirty callback only if the code has been
2934 if (dirty_flags
== 0xff)
2935 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2938 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2942 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2943 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2944 #if !defined(CONFIG_USER_ONLY)
2945 tb_invalidate_phys_page_fast(ram_addr
, 4);
2946 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2949 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2950 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2951 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2952 /* we remove the notdirty callback only if the code has been
2954 if (dirty_flags
== 0xff)
2955 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2958 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2959 NULL
, /* never used */
2960 NULL
, /* never used */
2961 NULL
, /* never used */
2964 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2965 notdirty_mem_writeb
,
2966 notdirty_mem_writew
,
2967 notdirty_mem_writel
,
2970 /* Generate a debug exception if a watchpoint has been hit. */
2971 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2973 CPUState
*env
= cpu_single_env
;
2974 target_ulong pc
, cs_base
;
2975 TranslationBlock
*tb
;
2980 if (env
->watchpoint_hit
) {
2981 /* We re-entered the check after replacing the TB. Now raise
2982 * the debug interrupt so that is will trigger after the
2983 * current instruction. */
2984 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2987 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2988 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2989 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2990 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2991 wp
->flags
|= BP_WATCHPOINT_HIT
;
2992 if (!env
->watchpoint_hit
) {
2993 env
->watchpoint_hit
= wp
;
2994 tb
= tb_find_pc(env
->mem_io_pc
);
2996 cpu_abort(env
, "check_watchpoint: could not find TB for "
2997 "pc=%p", (void *)env
->mem_io_pc
);
2999 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3000 tb_phys_invalidate(tb
, -1);
3001 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3002 env
->exception_index
= EXCP_DEBUG
;
3004 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3005 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3007 cpu_resume_from_signal(env
, NULL
);
3010 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3015 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3016 so these check for a hit then pass through to the normal out-of-line
3018 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3020 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3021 return ldub_phys(addr
);
3024 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3026 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3027 return lduw_phys(addr
);
3030 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3032 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3033 return ldl_phys(addr
);
3036 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3039 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3040 stb_phys(addr
, val
);
3043 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3046 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3047 stw_phys(addr
, val
);
3050 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3053 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3054 stl_phys(addr
, val
);
3057 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3063 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3069 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
3075 idx
= SUBPAGE_IDX(addr
);
3076 #if defined(DEBUG_SUBPAGE)
3077 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3078 mmio
, len
, addr
, idx
);
3080 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
3081 addr
+ mmio
->region_offset
[idx
][0][len
]);
3086 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3087 uint32_t value
, unsigned int len
)
3091 idx
= SUBPAGE_IDX(addr
);
3092 #if defined(DEBUG_SUBPAGE)
3093 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
3094 mmio
, len
, addr
, idx
, value
);
3096 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
3097 addr
+ mmio
->region_offset
[idx
][1][len
],
3101 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3103 #if defined(DEBUG_SUBPAGE)
3104 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3107 return subpage_readlen(opaque
, addr
, 0);
3110 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3113 #if defined(DEBUG_SUBPAGE)
3114 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3116 subpage_writelen(opaque
, addr
, value
, 0);
3119 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3121 #if defined(DEBUG_SUBPAGE)
3122 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3125 return subpage_readlen(opaque
, addr
, 1);
3128 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3131 #if defined(DEBUG_SUBPAGE)
3132 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3134 subpage_writelen(opaque
, addr
, value
, 1);
3137 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3139 #if defined(DEBUG_SUBPAGE)
3140 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3143 return subpage_readlen(opaque
, addr
, 2);
3146 static void subpage_writel (void *opaque
,
3147 target_phys_addr_t addr
, uint32_t value
)
3149 #if defined(DEBUG_SUBPAGE)
3150 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3152 subpage_writelen(opaque
, addr
, value
, 2);
3155 static CPUReadMemoryFunc
* const subpage_read
[] = {
3161 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3167 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3168 ram_addr_t memory
, ram_addr_t region_offset
)
3173 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3175 idx
= SUBPAGE_IDX(start
);
3176 eidx
= SUBPAGE_IDX(end
);
3177 #if defined(DEBUG_SUBPAGE)
3178 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3179 mmio
, start
, end
, idx
, eidx
, memory
);
3181 memory
>>= IO_MEM_SHIFT
;
3182 for (; idx
<= eidx
; idx
++) {
3183 for (i
= 0; i
< 4; i
++) {
3184 if (io_mem_read
[memory
][i
]) {
3185 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3186 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3187 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3189 if (io_mem_write
[memory
][i
]) {
3190 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3191 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3192 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3200 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3201 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3206 mmio
= qemu_mallocz(sizeof(subpage_t
));
3209 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3210 #if defined(DEBUG_SUBPAGE)
3211 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3212 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3214 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3215 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3221 static int get_free_io_mem_idx(void)
3225 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3226 if (!io_mem_used
[i
]) {
3230 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3234 /* mem_read and mem_write are arrays of functions containing the
3235 function to access byte (index 0), word (index 1) and dword (index
3236 2). Functions can be omitted with a NULL function pointer.
3237 If io_index is non zero, the corresponding io zone is
3238 modified. If it is zero, a new io zone is allocated. The return
3239 value can be used with cpu_register_physical_memory(). (-1) is
3240 returned if error. */
3241 static int cpu_register_io_memory_fixed(int io_index
,
3242 CPUReadMemoryFunc
* const *mem_read
,
3243 CPUWriteMemoryFunc
* const *mem_write
,
3246 int i
, subwidth
= 0;
3248 if (io_index
<= 0) {
3249 io_index
= get_free_io_mem_idx();
3253 io_index
>>= IO_MEM_SHIFT
;
3254 if (io_index
>= IO_MEM_NB_ENTRIES
)
3258 for(i
= 0;i
< 3; i
++) {
3259 if (!mem_read
[i
] || !mem_write
[i
])
3260 subwidth
= IO_MEM_SUBWIDTH
;
3261 io_mem_read
[io_index
][i
] = mem_read
[i
];
3262 io_mem_write
[io_index
][i
] = mem_write
[i
];
3264 io_mem_opaque
[io_index
] = opaque
;
3265 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3268 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3269 CPUWriteMemoryFunc
* const *mem_write
,
3272 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3275 void cpu_unregister_io_memory(int io_table_address
)
3278 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3280 for (i
=0;i
< 3; i
++) {
3281 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3282 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3284 io_mem_opaque
[io_index
] = NULL
;
3285 io_mem_used
[io_index
] = 0;
3288 static void io_mem_init(void)
3292 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3293 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3294 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3298 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3299 watch_mem_write
, NULL
);
3302 #endif /* !defined(CONFIG_USER_ONLY) */
3304 /* physical memory access (slow version, mainly for debug) */
3305 #if defined(CONFIG_USER_ONLY)
3306 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3307 uint8_t *buf
, int len
, int is_write
)
3314 page
= addr
& TARGET_PAGE_MASK
;
3315 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3318 flags
= page_get_flags(page
);
3319 if (!(flags
& PAGE_VALID
))
3322 if (!(flags
& PAGE_WRITE
))
3324 /* XXX: this code should not depend on lock_user */
3325 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3328 unlock_user(p
, addr
, l
);
3330 if (!(flags
& PAGE_READ
))
3332 /* XXX: this code should not depend on lock_user */
3333 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3336 unlock_user(p
, addr
, 0);
3346 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3347 int len
, int is_write
)
3352 target_phys_addr_t page
;
3357 page
= addr
& TARGET_PAGE_MASK
;
3358 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3361 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3363 pd
= IO_MEM_UNASSIGNED
;
3365 pd
= p
->phys_offset
;
3369 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3370 target_phys_addr_t addr1
= addr
;
3371 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3373 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3374 /* XXX: could force cpu_single_env to NULL to avoid
3376 if (l
>= 4 && ((addr1
& 3) == 0)) {
3377 /* 32 bit write access */
3379 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3381 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3382 /* 16 bit write access */
3384 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3387 /* 8 bit write access */
3389 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3393 unsigned long addr1
;
3394 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3396 ptr
= qemu_get_ram_ptr(addr1
);
3397 memcpy(ptr
, buf
, l
);
3398 if (!cpu_physical_memory_is_dirty(addr1
)) {
3399 /* invalidate code */
3400 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3402 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3403 (0xff & ~CODE_DIRTY_FLAG
);
3407 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3408 !(pd
& IO_MEM_ROMD
)) {
3409 target_phys_addr_t addr1
= addr
;
3411 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3413 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3414 if (l
>= 4 && ((addr1
& 3) == 0)) {
3415 /* 32 bit read access */
3416 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3419 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3420 /* 16 bit read access */
3421 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3425 /* 8 bit read access */
3426 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3432 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3433 (addr
& ~TARGET_PAGE_MASK
);
3434 memcpy(buf
, ptr
, l
);
3443 /* used for ROM loading : can write in RAM and ROM */
3444 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3445 const uint8_t *buf
, int len
)
3449 target_phys_addr_t page
;
3454 page
= addr
& TARGET_PAGE_MASK
;
3455 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3458 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3460 pd
= IO_MEM_UNASSIGNED
;
3462 pd
= p
->phys_offset
;
3465 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3466 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3467 !(pd
& IO_MEM_ROMD
)) {
3470 unsigned long addr1
;
3471 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3473 ptr
= qemu_get_ram_ptr(addr1
);
3474 memcpy(ptr
, buf
, l
);
3484 target_phys_addr_t addr
;
3485 target_phys_addr_t len
;
3488 static BounceBuffer bounce
;
3490 typedef struct MapClient
{
3492 void (*callback
)(void *opaque
);
3493 QLIST_ENTRY(MapClient
) link
;
3496 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3497 = QLIST_HEAD_INITIALIZER(map_client_list
);
3499 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3501 MapClient
*client
= qemu_malloc(sizeof(*client
));
3503 client
->opaque
= opaque
;
3504 client
->callback
= callback
;
3505 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3509 void cpu_unregister_map_client(void *_client
)
3511 MapClient
*client
= (MapClient
*)_client
;
3513 QLIST_REMOVE(client
, link
);
3517 static void cpu_notify_map_clients(void)
3521 while (!QLIST_EMPTY(&map_client_list
)) {
3522 client
= QLIST_FIRST(&map_client_list
);
3523 client
->callback(client
->opaque
);
3524 cpu_unregister_map_client(client
);
3528 /* Map a physical memory region into a host virtual address.
3529 * May map a subset of the requested range, given by and returned in *plen.
3530 * May return NULL if resources needed to perform the mapping are exhausted.
3531 * Use only for reads OR writes - not for read-modify-write operations.
3532 * Use cpu_register_map_client() to know when retrying the map operation is
3533 * likely to succeed.
3535 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3536 target_phys_addr_t
*plen
,
3539 target_phys_addr_t len
= *plen
;
3540 target_phys_addr_t done
= 0;
3542 uint8_t *ret
= NULL
;
3544 target_phys_addr_t page
;
3547 unsigned long addr1
;
3550 page
= addr
& TARGET_PAGE_MASK
;
3551 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3554 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3556 pd
= IO_MEM_UNASSIGNED
;
3558 pd
= p
->phys_offset
;
3561 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3562 if (done
|| bounce
.buffer
) {
3565 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3569 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3571 ptr
= bounce
.buffer
;
3573 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3574 ptr
= qemu_get_ram_ptr(addr1
);
3578 } else if (ret
+ done
!= ptr
) {
3590 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3591 * Will also mark the memory as dirty if is_write == 1. access_len gives
3592 * the amount of memory that was actually read or written by the caller.
3594 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3595 int is_write
, target_phys_addr_t access_len
)
3597 if (buffer
!= bounce
.buffer
) {
3599 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3600 while (access_len
) {
3602 l
= TARGET_PAGE_SIZE
;
3605 if (!cpu_physical_memory_is_dirty(addr1
)) {
3606 /* invalidate code */
3607 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3609 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3610 (0xff & ~CODE_DIRTY_FLAG
);
3619 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3621 qemu_vfree(bounce
.buffer
);
3622 bounce
.buffer
= NULL
;
3623 cpu_notify_map_clients();
3626 /* warning: addr must be aligned */
3627 uint32_t ldl_phys(target_phys_addr_t addr
)
3635 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3637 pd
= IO_MEM_UNASSIGNED
;
3639 pd
= p
->phys_offset
;
3642 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3643 !(pd
& IO_MEM_ROMD
)) {
3645 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3647 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3648 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3651 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3652 (addr
& ~TARGET_PAGE_MASK
);
3658 /* warning: addr must be aligned */
3659 uint64_t ldq_phys(target_phys_addr_t addr
)
3667 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3669 pd
= IO_MEM_UNASSIGNED
;
3671 pd
= p
->phys_offset
;
3674 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3675 !(pd
& IO_MEM_ROMD
)) {
3677 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3679 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3680 #ifdef TARGET_WORDS_BIGENDIAN
3681 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3682 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3684 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3685 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3689 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3690 (addr
& ~TARGET_PAGE_MASK
);
3697 uint32_t ldub_phys(target_phys_addr_t addr
)
3700 cpu_physical_memory_read(addr
, &val
, 1);
3705 uint32_t lduw_phys(target_phys_addr_t addr
)
3708 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3709 return tswap16(val
);
3712 /* warning: addr must be aligned. The ram page is not masked as dirty
3713 and the code inside is not invalidated. It is useful if the dirty
3714 bits are used to track modified PTEs */
3715 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3722 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3724 pd
= IO_MEM_UNASSIGNED
;
3726 pd
= p
->phys_offset
;
3729 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3730 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3732 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3733 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3735 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3736 ptr
= qemu_get_ram_ptr(addr1
);
3739 if (unlikely(in_migration
)) {
3740 if (!cpu_physical_memory_is_dirty(addr1
)) {
3741 /* invalidate code */
3742 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3744 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3745 (0xff & ~CODE_DIRTY_FLAG
);
3751 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3758 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3760 pd
= IO_MEM_UNASSIGNED
;
3762 pd
= p
->phys_offset
;
3765 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3766 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3768 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3769 #ifdef TARGET_WORDS_BIGENDIAN
3770 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3771 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3773 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3774 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3777 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3778 (addr
& ~TARGET_PAGE_MASK
);
3783 /* warning: addr must be aligned */
3784 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3791 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3793 pd
= IO_MEM_UNASSIGNED
;
3795 pd
= p
->phys_offset
;
3798 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3799 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3801 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3802 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3804 unsigned long addr1
;
3805 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3807 ptr
= qemu_get_ram_ptr(addr1
);
3809 if (!cpu_physical_memory_is_dirty(addr1
)) {
3810 /* invalidate code */
3811 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3813 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3814 (0xff & ~CODE_DIRTY_FLAG
);
3820 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3823 cpu_physical_memory_write(addr
, &v
, 1);
3827 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3829 uint16_t v
= tswap16(val
);
3830 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3834 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3837 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3840 /* virtual memory access for debug (includes writing to ROM) */
3841 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3842 uint8_t *buf
, int len
, int is_write
)
3845 target_phys_addr_t phys_addr
;
3849 page
= addr
& TARGET_PAGE_MASK
;
3850 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3851 /* if no physical page mapped, return an error */
3852 if (phys_addr
== -1)
3854 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3857 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3859 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3861 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3870 /* in deterministic execution mode, instructions doing device I/Os
3871 must be at the end of the TB */
3872 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3874 TranslationBlock
*tb
;
3876 target_ulong pc
, cs_base
;
3879 tb
= tb_find_pc((unsigned long)retaddr
);
3881 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3884 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3885 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3886 /* Calculate how many instructions had been executed before the fault
3888 n
= n
- env
->icount_decr
.u16
.low
;
3889 /* Generate a new TB ending on the I/O insn. */
3891 /* On MIPS and SH, delay slot instructions can only be restarted if
3892 they were already the first instruction in the TB. If this is not
3893 the first instruction in a TB then re-execute the preceding
3895 #if defined(TARGET_MIPS)
3896 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3897 env
->active_tc
.PC
-= 4;
3898 env
->icount_decr
.u16
.low
++;
3899 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3901 #elif defined(TARGET_SH4)
3902 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3905 env
->icount_decr
.u16
.low
++;
3906 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3909 /* This should never happen. */
3910 if (n
> CF_COUNT_MASK
)
3911 cpu_abort(env
, "TB too big during recompile");
3913 cflags
= n
| CF_LAST_IO
;
3915 cs_base
= tb
->cs_base
;
3917 tb_phys_invalidate(tb
, -1);
3918 /* FIXME: In theory this could raise an exception. In practice
3919 we have already translated the block once so it's probably ok. */
3920 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3921 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3922 the first in the TB) then we end up generating a whole new TB and
3923 repeating the fault, which is horribly inefficient.
3924 Better would be to execute just this insn uncached, or generate a
3926 cpu_resume_from_signal(env
, NULL
);
3929 #if !defined(CONFIG_USER_ONLY)
3931 void dump_exec_info(FILE *f
,
3932 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3934 int i
, target_code_size
, max_target_code_size
;
3935 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3936 TranslationBlock
*tb
;
3938 target_code_size
= 0;
3939 max_target_code_size
= 0;
3941 direct_jmp_count
= 0;
3942 direct_jmp2_count
= 0;
3943 for(i
= 0; i
< nb_tbs
; i
++) {
3945 target_code_size
+= tb
->size
;
3946 if (tb
->size
> max_target_code_size
)
3947 max_target_code_size
= tb
->size
;
3948 if (tb
->page_addr
[1] != -1)
3950 if (tb
->tb_next_offset
[0] != 0xffff) {
3952 if (tb
->tb_next_offset
[1] != 0xffff) {
3953 direct_jmp2_count
++;
3957 /* XXX: avoid using doubles ? */
3958 cpu_fprintf(f
, "Translation buffer state:\n");
3959 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3960 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3961 cpu_fprintf(f
, "TB count %d/%d\n",
3962 nb_tbs
, code_gen_max_blocks
);
3963 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3964 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3965 max_target_code_size
);
3966 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3967 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3968 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3969 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3971 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3972 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3974 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3976 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3977 cpu_fprintf(f
, "\nStatistics:\n");
3978 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3979 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3980 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3981 tcg_dump_info(f
, cpu_fprintf
);
3984 #define MMUSUFFIX _cmmu
3985 #define GETPC() NULL
3986 #define env cpu_single_env
3987 #define SOFTMMU_CODE_ACCESS
3990 #include "softmmu_template.h"
3993 #include "softmmu_template.h"
3996 #include "softmmu_template.h"
3999 #include "softmmu_template.h"