2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
22 #define WIN32_LEAN_AND_MEAN
25 #include <sys/types.h>
38 #include "qemu-common.h"
43 #if defined(CONFIG_USER_ONLY)
47 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_UNASSIGNED
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation. */
64 #define SMC_BITMAP_USE_THRESHOLD 10
66 #define MMAP_AREA_START 0x00000000
67 #define MMAP_AREA_END 0xa8000000
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
87 static TranslationBlock
*tbs
;
88 int code_gen_max_blocks
;
89 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
97 section close to code segment. */
98 #define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
102 #define code_gen_section \
103 __attribute__((aligned (32)))
106 uint8_t code_gen_prologue
[1024] code_gen_section
;
107 static uint8_t *code_gen_buffer
;
108 static unsigned long code_gen_buffer_size
;
109 /* threshold to flush the translated code buffer */
110 static unsigned long code_gen_buffer_max_size
;
111 uint8_t *code_gen_ptr
;
113 #if !defined(CONFIG_USER_ONLY)
114 ram_addr_t phys_ram_size
;
116 uint8_t *phys_ram_base
;
117 uint8_t *phys_ram_dirty
;
118 static int in_migration
;
119 static ram_addr_t phys_ram_alloc_offset
= 0;
123 /* current CPU in the current thread. It is only valid inside
125 CPUState
*cpu_single_env
;
126 /* 0 = Do not count executed instructions.
127 1 = Precise instruction counting.
128 2 = Adaptive rate instruction counting. */
130 /* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 typedef struct PhysPageDesc
{
147 /* offset in host memory of the page + io_index in the low bits */
148 ram_addr_t phys_offset
;
149 ram_addr_t region_offset
;
153 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154 /* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
158 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
160 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
163 #define L1_SIZE (1 << L1_BITS)
164 #define L2_SIZE (1 << L2_BITS)
166 unsigned long qemu_real_host_page_size
;
167 unsigned long qemu_host_page_bits
;
168 unsigned long qemu_host_page_size
;
169 unsigned long qemu_host_page_mask
;
171 /* XXX: for system emulation, it could just be an array */
172 static PageDesc
*l1_map
[L1_SIZE
];
173 static PhysPageDesc
**l1_phys_map
;
175 #if !defined(CONFIG_USER_ONLY)
176 static void io_mem_init(void);
178 /* io memory support */
179 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
180 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
181 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
182 char io_mem_used
[IO_MEM_NB_ENTRIES
];
183 static int io_mem_watch
;
187 static const char *logfilename
= "/tmp/qemu.log";
190 static int log_append
= 0;
193 static int tlb_flush_count
;
194 static int tb_flush_count
;
195 static int tb_phys_invalidate_count
;
197 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198 typedef struct subpage_t
{
199 target_phys_addr_t base
;
200 CPUReadMemoryFunc
**mem_read
[TARGET_PAGE_SIZE
][4];
201 CPUWriteMemoryFunc
**mem_write
[TARGET_PAGE_SIZE
][4];
202 void *opaque
[TARGET_PAGE_SIZE
][2][4];
203 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
207 static void map_exec(void *addr
, long size
)
210 VirtualProtect(addr
, size
,
211 PAGE_EXECUTE_READWRITE
, &old_protect
);
215 static void map_exec(void *addr
, long size
)
217 unsigned long start
, end
, page_size
;
219 page_size
= getpagesize();
220 start
= (unsigned long)addr
;
221 start
&= ~(page_size
- 1);
223 end
= (unsigned long)addr
+ size
;
224 end
+= page_size
- 1;
225 end
&= ~(page_size
- 1);
227 mprotect((void *)start
, end
- start
,
228 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
232 static void page_init(void)
234 /* NOTE: we can always suppose that qemu_host_page_size >=
238 SYSTEM_INFO system_info
;
240 GetSystemInfo(&system_info
);
241 qemu_real_host_page_size
= system_info
.dwPageSize
;
244 qemu_real_host_page_size
= getpagesize();
246 if (qemu_host_page_size
== 0)
247 qemu_host_page_size
= qemu_real_host_page_size
;
248 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
249 qemu_host_page_size
= TARGET_PAGE_SIZE
;
250 qemu_host_page_bits
= 0;
251 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
252 qemu_host_page_bits
++;
253 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 l1_phys_map
= qemu_vmalloc(L1_SIZE
* sizeof(void *));
255 memset(l1_phys_map
, 0, L1_SIZE
* sizeof(void *));
257 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
259 long long startaddr
, endaddr
;
264 last_brk
= (unsigned long)sbrk(0);
265 f
= fopen("/proc/self/maps", "r");
268 n
= fscanf (f
, "%llx-%llx %*[^\n]\n", &startaddr
, &endaddr
);
270 startaddr
= MIN(startaddr
,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
272 endaddr
= MIN(endaddr
,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS
) - 1);
274 page_set_flags(startaddr
& TARGET_PAGE_MASK
,
275 TARGET_PAGE_ALIGN(endaddr
),
286 static inline PageDesc
**page_l1_map(target_ulong index
)
288 #if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
291 if (index
> ((target_ulong
)L2_SIZE
* L1_SIZE
))
294 return &l1_map
[index
>> L2_BITS
];
297 static inline PageDesc
*page_find_alloc(target_ulong index
)
300 lp
= page_l1_map(index
);
306 /* allocate if not found */
307 #if defined(CONFIG_USER_ONLY)
308 size_t len
= sizeof(PageDesc
) * L2_SIZE
;
309 /* Don't use qemu_malloc because it may recurse. */
310 p
= mmap(0, len
, PROT_READ
| PROT_WRITE
,
311 MAP_PRIVATE
| MAP_ANONYMOUS
, -1, 0);
314 unsigned long addr
= h2g(p
);
315 page_set_flags(addr
& TARGET_PAGE_MASK
,
316 TARGET_PAGE_ALIGN(addr
+ len
),
320 p
= qemu_mallocz(sizeof(PageDesc
) * L2_SIZE
);
324 return p
+ (index
& (L2_SIZE
- 1));
327 static inline PageDesc
*page_find(target_ulong index
)
330 lp
= page_l1_map(index
);
337 return p
+ (index
& (L2_SIZE
- 1));
340 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
345 p
= (void **)l1_phys_map
;
346 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
348 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351 lp
= p
+ ((index
>> (L1_BITS
+ L2_BITS
)) & (L1_SIZE
- 1));
354 /* allocate if not found */
357 p
= qemu_vmalloc(sizeof(void *) * L1_SIZE
);
358 memset(p
, 0, sizeof(void *) * L1_SIZE
);
362 lp
= p
+ ((index
>> L2_BITS
) & (L1_SIZE
- 1));
366 /* allocate if not found */
369 pd
= qemu_vmalloc(sizeof(PhysPageDesc
) * L2_SIZE
);
371 for (i
= 0; i
< L2_SIZE
; i
++) {
372 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
373 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
376 return ((PhysPageDesc
*)pd
) + (index
& (L2_SIZE
- 1));
379 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
381 return phys_page_find_alloc(index
, 0);
384 #if !defined(CONFIG_USER_ONLY)
385 static void tlb_protect_code(ram_addr_t ram_addr
);
386 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
388 #define mmap_lock() do { } while(0)
389 #define mmap_unlock() do { } while(0)
392 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
394 #if defined(CONFIG_USER_ONLY)
395 /* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397 #define USE_STATIC_CODE_GEN_BUFFER
400 #ifdef USE_STATIC_CODE_GEN_BUFFER
401 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
404 static void code_gen_alloc(unsigned long tb_size
)
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer
= static_code_gen_buffer
;
408 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
409 map_exec(code_gen_buffer
, code_gen_buffer_size
);
411 code_gen_buffer_size
= tb_size
;
412 if (code_gen_buffer_size
== 0) {
413 #if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
417 /* XXX: needs ajustments */
418 code_gen_buffer_size
= (unsigned long)(phys_ram_size
/ 4);
421 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
422 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425 #if defined(__linux__)
430 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
431 #if defined(__x86_64__)
433 /* Cannot map more than that */
434 if (code_gen_buffer_size
> (800 * 1024 * 1024))
435 code_gen_buffer_size
= (800 * 1024 * 1024);
436 #elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
439 start
= (void *) 0x60000000UL
;
440 if (code_gen_buffer_size
> (512 * 1024 * 1024))
441 code_gen_buffer_size
= (512 * 1024 * 1024);
442 #elif defined(__arm__)
443 /* Map the buffer below 32M, so we can use direct calls and branches */
445 start
= (void *) 0x01000000UL
;
446 if (code_gen_buffer_size
> 16 * 1024 * 1024)
447 code_gen_buffer_size
= 16 * 1024 * 1024;
449 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
450 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
452 if (code_gen_buffer
== MAP_FAILED
) {
453 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
457 #elif defined(__FreeBSD__)
461 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
462 #if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
466 addr
= (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size
> (800 * 1024 * 1024))
469 code_gen_buffer_size
= (800 * 1024 * 1024);
471 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
472 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
474 if (code_gen_buffer
== MAP_FAILED
) {
475 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
480 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
481 map_exec(code_gen_buffer
, code_gen_buffer_size
);
483 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
484 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
485 code_gen_buffer_max_size
= code_gen_buffer_size
-
486 code_gen_max_block_size();
487 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
488 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
491 /* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
494 void cpu_exec_init_all(unsigned long tb_size
)
497 code_gen_alloc(tb_size
);
498 code_gen_ptr
= code_gen_buffer
;
500 #if !defined(CONFIG_USER_ONLY)
505 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
507 #define CPU_COMMON_SAVE_VERSION 1
509 static void cpu_common_save(QEMUFile
*f
, void *opaque
)
511 CPUState
*env
= opaque
;
513 qemu_put_be32s(f
, &env
->halted
);
514 qemu_put_be32s(f
, &env
->interrupt_request
);
517 static int cpu_common_load(QEMUFile
*f
, void *opaque
, int version_id
)
519 CPUState
*env
= opaque
;
521 if (version_id
!= CPU_COMMON_SAVE_VERSION
)
524 qemu_get_be32s(f
, &env
->halted
);
525 qemu_get_be32s(f
, &env
->interrupt_request
);
532 void cpu_exec_init(CPUState
*env
)
537 env
->next_cpu
= NULL
;
540 while (*penv
!= NULL
) {
541 penv
= (CPUState
**)&(*penv
)->next_cpu
;
544 env
->cpu_index
= cpu_index
;
545 TAILQ_INIT(&env
->breakpoints
);
546 TAILQ_INIT(&env
->watchpoints
);
548 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
549 register_savevm("cpu_common", cpu_index
, CPU_COMMON_SAVE_VERSION
,
550 cpu_common_save
, cpu_common_load
, env
);
551 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
552 cpu_save
, cpu_load
, env
);
556 static inline void invalidate_page_bitmap(PageDesc
*p
)
558 if (p
->code_bitmap
) {
559 qemu_free(p
->code_bitmap
);
560 p
->code_bitmap
= NULL
;
562 p
->code_write_count
= 0;
565 /* set to NULL all the 'first_tb' fields in all PageDescs */
566 static void page_flush_tb(void)
571 for(i
= 0; i
< L1_SIZE
; i
++) {
574 for(j
= 0; j
< L2_SIZE
; j
++) {
576 invalidate_page_bitmap(p
);
583 /* flush all the translation blocks */
584 /* XXX: tb_flush is currently not thread safe */
585 void tb_flush(CPUState
*env1
)
588 #if defined(DEBUG_FLUSH)
589 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
590 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
592 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
594 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
595 cpu_abort(env1
, "Internal error: code buffer overflow\n");
599 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
600 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
603 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
606 code_gen_ptr
= code_gen_buffer
;
607 /* XXX: flush processor icache at this point if cache flush is
612 #ifdef DEBUG_TB_CHECK
614 static void tb_invalidate_check(target_ulong address
)
616 TranslationBlock
*tb
;
618 address
&= TARGET_PAGE_MASK
;
619 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
620 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
621 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
622 address
>= tb
->pc
+ tb
->size
)) {
623 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
624 address
, (long)tb
->pc
, tb
->size
);
630 /* verify that all the pages have correct rights for code */
631 static void tb_page_check(void)
633 TranslationBlock
*tb
;
634 int i
, flags1
, flags2
;
636 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
637 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
638 flags1
= page_get_flags(tb
->pc
);
639 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
640 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
641 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
642 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
648 static void tb_jmp_check(TranslationBlock
*tb
)
650 TranslationBlock
*tb1
;
653 /* suppress any remaining jumps to this TB */
657 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
660 tb1
= tb1
->jmp_next
[n1
];
662 /* check end of list */
664 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb
);
670 /* invalidate one TB */
671 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
674 TranslationBlock
*tb1
;
678 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
681 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
685 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
687 TranslationBlock
*tb1
;
693 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
695 *ptb
= tb1
->page_next
[n1
];
698 ptb
= &tb1
->page_next
[n1
];
702 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
704 TranslationBlock
*tb1
, **ptb
;
707 ptb
= &tb
->jmp_next
[n
];
710 /* find tb(n) in circular list */
714 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
715 if (n1
== n
&& tb1
== tb
)
718 ptb
= &tb1
->jmp_first
;
720 ptb
= &tb1
->jmp_next
[n1
];
723 /* now we can suppress tb(n) from the list */
724 *ptb
= tb
->jmp_next
[n
];
726 tb
->jmp_next
[n
] = NULL
;
730 /* reset the jump entry 'n' of a TB so that it is not chained to
732 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
734 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
737 void tb_phys_invalidate(TranslationBlock
*tb
, target_ulong page_addr
)
742 target_phys_addr_t phys_pc
;
743 TranslationBlock
*tb1
, *tb2
;
745 /* remove the TB from the hash list */
746 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
747 h
= tb_phys_hash_func(phys_pc
);
748 tb_remove(&tb_phys_hash
[h
], tb
,
749 offsetof(TranslationBlock
, phys_hash_next
));
751 /* remove the TB from the page list */
752 if (tb
->page_addr
[0] != page_addr
) {
753 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
754 tb_page_remove(&p
->first_tb
, tb
);
755 invalidate_page_bitmap(p
);
757 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
758 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
759 tb_page_remove(&p
->first_tb
, tb
);
760 invalidate_page_bitmap(p
);
763 tb_invalidated_flag
= 1;
765 /* remove the TB from the hash list */
766 h
= tb_jmp_cache_hash_func(tb
->pc
);
767 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
768 if (env
->tb_jmp_cache
[h
] == tb
)
769 env
->tb_jmp_cache
[h
] = NULL
;
772 /* suppress this TB from the two jump lists */
773 tb_jmp_remove(tb
, 0);
774 tb_jmp_remove(tb
, 1);
776 /* suppress any remaining jumps to this TB */
782 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
783 tb2
= tb1
->jmp_next
[n1
];
784 tb_reset_jump(tb1
, n1
);
785 tb1
->jmp_next
[n1
] = NULL
;
788 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
790 tb_phys_invalidate_count
++;
793 static inline void set_bits(uint8_t *tab
, int start
, int len
)
799 mask
= 0xff << (start
& 7);
800 if ((start
& ~7) == (end
& ~7)) {
802 mask
&= ~(0xff << (end
& 7));
807 start
= (start
+ 8) & ~7;
809 while (start
< end1
) {
814 mask
= ~(0xff << (end
& 7));
820 static void build_page_bitmap(PageDesc
*p
)
822 int n
, tb_start
, tb_end
;
823 TranslationBlock
*tb
;
825 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
830 tb
= (TranslationBlock
*)((long)tb
& ~3);
831 /* NOTE: this is subtle as a TB may span two physical pages */
833 /* NOTE: tb_end may be after the end of the page, but
834 it is not a problem */
835 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
836 tb_end
= tb_start
+ tb
->size
;
837 if (tb_end
> TARGET_PAGE_SIZE
)
838 tb_end
= TARGET_PAGE_SIZE
;
841 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
843 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
844 tb
= tb
->page_next
[n
];
848 TranslationBlock
*tb_gen_code(CPUState
*env
,
849 target_ulong pc
, target_ulong cs_base
,
850 int flags
, int cflags
)
852 TranslationBlock
*tb
;
854 target_ulong phys_pc
, phys_page2
, virt_page2
;
857 phys_pc
= get_phys_addr_code(env
, pc
);
860 /* flush must be done */
862 /* cannot fail at this point */
864 /* Don't forget to invalidate previous TB info. */
865 tb_invalidated_flag
= 1;
867 tc_ptr
= code_gen_ptr
;
869 tb
->cs_base
= cs_base
;
872 cpu_gen_code(env
, tb
, &code_gen_size
);
873 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
875 /* check next page if needed */
876 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
878 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
879 phys_page2
= get_phys_addr_code(env
, virt_page2
);
881 tb_link_phys(tb
, phys_pc
, phys_page2
);
885 /* invalidate all TBs which intersect with the target physical page
886 starting in range [start;end[. NOTE: start and end must refer to
887 the same physical page. 'is_cpu_write_access' should be true if called
888 from a real cpu write access: the virtual CPU will exit the current
889 TB if code is modified inside this TB. */
890 void tb_invalidate_phys_page_range(target_phys_addr_t start
, target_phys_addr_t end
,
891 int is_cpu_write_access
)
893 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
894 CPUState
*env
= cpu_single_env
;
895 target_ulong tb_start
, tb_end
;
898 #ifdef TARGET_HAS_PRECISE_SMC
899 int current_tb_not_found
= is_cpu_write_access
;
900 TranslationBlock
*current_tb
= NULL
;
901 int current_tb_modified
= 0;
902 target_ulong current_pc
= 0;
903 target_ulong current_cs_base
= 0;
904 int current_flags
= 0;
905 #endif /* TARGET_HAS_PRECISE_SMC */
907 p
= page_find(start
>> TARGET_PAGE_BITS
);
910 if (!p
->code_bitmap
&&
911 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
912 is_cpu_write_access
) {
913 /* build code bitmap */
914 build_page_bitmap(p
);
917 /* we remove all the TBs in the range [start, end[ */
918 /* XXX: see if in some cases it could be faster to invalidate all the code */
922 tb
= (TranslationBlock
*)((long)tb
& ~3);
923 tb_next
= tb
->page_next
[n
];
924 /* NOTE: this is subtle as a TB may span two physical pages */
926 /* NOTE: tb_end may be after the end of the page, but
927 it is not a problem */
928 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
929 tb_end
= tb_start
+ tb
->size
;
931 tb_start
= tb
->page_addr
[1];
932 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
934 if (!(tb_end
<= start
|| tb_start
>= end
)) {
935 #ifdef TARGET_HAS_PRECISE_SMC
936 if (current_tb_not_found
) {
937 current_tb_not_found
= 0;
939 if (env
->mem_io_pc
) {
940 /* now we have a real cpu fault */
941 current_tb
= tb_find_pc(env
->mem_io_pc
);
944 if (current_tb
== tb
&&
945 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
946 /* If we are modifying the current TB, we must stop
947 its execution. We could be more precise by checking
948 that the modification is after the current PC, but it
949 would require a specialized function to partially
950 restore the CPU state */
952 current_tb_modified
= 1;
953 cpu_restore_state(current_tb
, env
,
954 env
->mem_io_pc
, NULL
);
955 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
958 #endif /* TARGET_HAS_PRECISE_SMC */
959 /* we need to do that to handle the case where a signal
960 occurs while doing tb_phys_invalidate() */
963 saved_tb
= env
->current_tb
;
964 env
->current_tb
= NULL
;
966 tb_phys_invalidate(tb
, -1);
968 env
->current_tb
= saved_tb
;
969 if (env
->interrupt_request
&& env
->current_tb
)
970 cpu_interrupt(env
, env
->interrupt_request
);
975 #if !defined(CONFIG_USER_ONLY)
976 /* if no code remaining, no need to continue to use slow writes */
978 invalidate_page_bitmap(p
);
979 if (is_cpu_write_access
) {
980 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
984 #ifdef TARGET_HAS_PRECISE_SMC
985 if (current_tb_modified
) {
986 /* we generate a block containing just the instruction
987 modifying the memory. It will ensure that it cannot modify
989 env
->current_tb
= NULL
;
990 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
991 cpu_resume_from_signal(env
, NULL
);
996 /* len must be <= 8 and start must be a multiple of len */
997 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start
, int len
)
1003 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1004 cpu_single_env
->mem_io_vaddr
, len
,
1005 cpu_single_env
->eip
,
1006 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1009 p
= page_find(start
>> TARGET_PAGE_BITS
);
1012 if (p
->code_bitmap
) {
1013 offset
= start
& ~TARGET_PAGE_MASK
;
1014 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1015 if (b
& ((1 << len
) - 1))
1019 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1023 #if !defined(CONFIG_SOFTMMU)
1024 static void tb_invalidate_phys_page(target_phys_addr_t addr
,
1025 unsigned long pc
, void *puc
)
1027 TranslationBlock
*tb
;
1030 #ifdef TARGET_HAS_PRECISE_SMC
1031 TranslationBlock
*current_tb
= NULL
;
1032 CPUState
*env
= cpu_single_env
;
1033 int current_tb_modified
= 0;
1034 target_ulong current_pc
= 0;
1035 target_ulong current_cs_base
= 0;
1036 int current_flags
= 0;
1039 addr
&= TARGET_PAGE_MASK
;
1040 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1044 #ifdef TARGET_HAS_PRECISE_SMC
1045 if (tb
&& pc
!= 0) {
1046 current_tb
= tb_find_pc(pc
);
1049 while (tb
!= NULL
) {
1051 tb
= (TranslationBlock
*)((long)tb
& ~3);
1052 #ifdef TARGET_HAS_PRECISE_SMC
1053 if (current_tb
== tb
&&
1054 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1055 /* If we are modifying the current TB, we must stop
1056 its execution. We could be more precise by checking
1057 that the modification is after the current PC, but it
1058 would require a specialized function to partially
1059 restore the CPU state */
1061 current_tb_modified
= 1;
1062 cpu_restore_state(current_tb
, env
, pc
, puc
);
1063 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1066 #endif /* TARGET_HAS_PRECISE_SMC */
1067 tb_phys_invalidate(tb
, addr
);
1068 tb
= tb
->page_next
[n
];
1071 #ifdef TARGET_HAS_PRECISE_SMC
1072 if (current_tb_modified
) {
1073 /* we generate a block containing just the instruction
1074 modifying the memory. It will ensure that it cannot modify
1076 env
->current_tb
= NULL
;
1077 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1078 cpu_resume_from_signal(env
, puc
);
1084 /* add the tb in the target page and protect it if necessary */
1085 static inline void tb_alloc_page(TranslationBlock
*tb
,
1086 unsigned int n
, target_ulong page_addr
)
1089 TranslationBlock
*last_first_tb
;
1091 tb
->page_addr
[n
] = page_addr
;
1092 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
);
1093 tb
->page_next
[n
] = p
->first_tb
;
1094 last_first_tb
= p
->first_tb
;
1095 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1096 invalidate_page_bitmap(p
);
1098 #if defined(TARGET_HAS_SMC) || 1
1100 #if defined(CONFIG_USER_ONLY)
1101 if (p
->flags
& PAGE_WRITE
) {
1106 /* force the host page as non writable (writes will have a
1107 page fault + mprotect overhead) */
1108 page_addr
&= qemu_host_page_mask
;
1110 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1111 addr
+= TARGET_PAGE_SIZE
) {
1113 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1117 p2
->flags
&= ~PAGE_WRITE
;
1118 page_get_flags(addr
);
1120 mprotect(g2h(page_addr
), qemu_host_page_size
,
1121 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1122 #ifdef DEBUG_TB_INVALIDATE
1123 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1128 /* if some code is already present, then the pages are already
1129 protected. So we handle the case where only the first TB is
1130 allocated in a physical page */
1131 if (!last_first_tb
) {
1132 tlb_protect_code(page_addr
);
1136 #endif /* TARGET_HAS_SMC */
1139 /* Allocate a new translation block. Flush the translation buffer if
1140 too many translation blocks or too much generated code. */
1141 TranslationBlock
*tb_alloc(target_ulong pc
)
1143 TranslationBlock
*tb
;
1145 if (nb_tbs
>= code_gen_max_blocks
||
1146 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1148 tb
= &tbs
[nb_tbs
++];
1154 void tb_free(TranslationBlock
*tb
)
1156 /* In practice this is mostly used for single use temporary TB
1157 Ignore the hard cases and just back up if this TB happens to
1158 be the last one generated. */
1159 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1160 code_gen_ptr
= tb
->tc_ptr
;
1165 /* add a new TB and link it to the physical page tables. phys_page2 is
1166 (-1) to indicate that only one page contains the TB. */
1167 void tb_link_phys(TranslationBlock
*tb
,
1168 target_ulong phys_pc
, target_ulong phys_page2
)
1171 TranslationBlock
**ptb
;
1173 /* Grab the mmap lock to stop another thread invalidating this TB
1174 before we are done. */
1176 /* add in the physical hash table */
1177 h
= tb_phys_hash_func(phys_pc
);
1178 ptb
= &tb_phys_hash
[h
];
1179 tb
->phys_hash_next
= *ptb
;
1182 /* add in the page list */
1183 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1184 if (phys_page2
!= -1)
1185 tb_alloc_page(tb
, 1, phys_page2
);
1187 tb
->page_addr
[1] = -1;
1189 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1190 tb
->jmp_next
[0] = NULL
;
1191 tb
->jmp_next
[1] = NULL
;
1193 /* init original jump addresses */
1194 if (tb
->tb_next_offset
[0] != 0xffff)
1195 tb_reset_jump(tb
, 0);
1196 if (tb
->tb_next_offset
[1] != 0xffff)
1197 tb_reset_jump(tb
, 1);
1199 #ifdef DEBUG_TB_CHECK
1205 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1206 tb[1].tc_ptr. Return NULL if not found */
1207 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1209 int m_min
, m_max
, m
;
1211 TranslationBlock
*tb
;
1215 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1216 tc_ptr
>= (unsigned long)code_gen_ptr
)
1218 /* binary search (cf Knuth) */
1221 while (m_min
<= m_max
) {
1222 m
= (m_min
+ m_max
) >> 1;
1224 v
= (unsigned long)tb
->tc_ptr
;
1227 else if (tc_ptr
< v
) {
1236 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1238 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1240 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1243 tb1
= tb
->jmp_next
[n
];
1245 /* find head of list */
1248 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1251 tb1
= tb1
->jmp_next
[n1
];
1253 /* we are now sure now that tb jumps to tb1 */
1256 /* remove tb from the jmp_first list */
1257 ptb
= &tb_next
->jmp_first
;
1261 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1262 if (n1
== n
&& tb1
== tb
)
1264 ptb
= &tb1
->jmp_next
[n1
];
1266 *ptb
= tb
->jmp_next
[n
];
1267 tb
->jmp_next
[n
] = NULL
;
1269 /* suppress the jump to next tb in generated code */
1270 tb_reset_jump(tb
, n
);
1272 /* suppress jumps in the tb on which we could have jumped */
1273 tb_reset_jump_recursive(tb_next
);
1277 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1279 tb_reset_jump_recursive2(tb
, 0);
1280 tb_reset_jump_recursive2(tb
, 1);
1283 #if defined(TARGET_HAS_ICE)
1284 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1286 target_phys_addr_t addr
;
1288 ram_addr_t ram_addr
;
1291 addr
= cpu_get_phys_page_debug(env
, pc
);
1292 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1294 pd
= IO_MEM_UNASSIGNED
;
1296 pd
= p
->phys_offset
;
1298 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1299 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1303 /* Add a watchpoint. */
1304 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1305 int flags
, CPUWatchpoint
**watchpoint
)
1307 target_ulong len_mask
= ~(len
- 1);
1310 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1311 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1312 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1313 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1316 wp
= qemu_malloc(sizeof(*wp
));
1319 wp
->len_mask
= len_mask
;
1322 /* keep all GDB-injected watchpoints in front */
1324 TAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1326 TAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1328 tlb_flush_page(env
, addr
);
1335 /* Remove a specific watchpoint. */
1336 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1339 target_ulong len_mask
= ~(len
- 1);
1342 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1343 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1344 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1345 cpu_watchpoint_remove_by_ref(env
, wp
);
1352 /* Remove a specific watchpoint by reference. */
1353 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1355 TAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1357 tlb_flush_page(env
, watchpoint
->vaddr
);
1359 qemu_free(watchpoint
);
1362 /* Remove all matching watchpoints. */
1363 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1365 CPUWatchpoint
*wp
, *next
;
1367 TAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1368 if (wp
->flags
& mask
)
1369 cpu_watchpoint_remove_by_ref(env
, wp
);
1373 /* Add a breakpoint. */
1374 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1375 CPUBreakpoint
**breakpoint
)
1377 #if defined(TARGET_HAS_ICE)
1380 bp
= qemu_malloc(sizeof(*bp
));
1385 /* keep all GDB-injected breakpoints in front */
1387 TAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1389 TAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1391 breakpoint_invalidate(env
, pc
);
1401 /* Remove a specific breakpoint. */
1402 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1404 #if defined(TARGET_HAS_ICE)
1407 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1408 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1409 cpu_breakpoint_remove_by_ref(env
, bp
);
1419 /* Remove a specific breakpoint by reference. */
1420 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1422 #if defined(TARGET_HAS_ICE)
1423 TAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1425 breakpoint_invalidate(env
, breakpoint
->pc
);
1427 qemu_free(breakpoint
);
1431 /* Remove all matching breakpoints. */
1432 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1434 #if defined(TARGET_HAS_ICE)
1435 CPUBreakpoint
*bp
, *next
;
1437 TAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1438 if (bp
->flags
& mask
)
1439 cpu_breakpoint_remove_by_ref(env
, bp
);
1444 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1445 CPU loop after each instruction */
1446 void cpu_single_step(CPUState
*env
, int enabled
)
1448 #if defined(TARGET_HAS_ICE)
1449 if (env
->singlestep_enabled
!= enabled
) {
1450 env
->singlestep_enabled
= enabled
;
1451 /* must flush all the translated code to avoid inconsistancies */
1452 /* XXX: only flush what is necessary */
1458 /* enable or disable low levels log */
1459 void cpu_set_log(int log_flags
)
1461 loglevel
= log_flags
;
1462 if (loglevel
&& !logfile
) {
1463 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1465 perror(logfilename
);
1468 #if !defined(CONFIG_SOFTMMU)
1469 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1471 static char logfile_buf
[4096];
1472 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1475 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1479 if (!loglevel
&& logfile
) {
1485 void cpu_set_log_filename(const char *filename
)
1487 logfilename
= strdup(filename
);
1492 cpu_set_log(loglevel
);
1495 /* mask must never be zero, except for A20 change call */
1496 void cpu_interrupt(CPUState
*env
, int mask
)
1498 #if !defined(USE_NPTL)
1499 TranslationBlock
*tb
;
1500 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1504 if (mask
& CPU_INTERRUPT_EXIT
) {
1505 env
->exit_request
= 1;
1506 mask
&= ~CPU_INTERRUPT_EXIT
;
1509 old_mask
= env
->interrupt_request
;
1510 env
->interrupt_request
|= mask
;
1511 #if defined(USE_NPTL)
1512 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1513 problem and hope the cpu will stop of its own accord. For userspace
1514 emulation this often isn't actually as bad as it sounds. Often
1515 signals are used primarily to interrupt blocking syscalls. */
1518 env
->icount_decr
.u16
.high
= 0xffff;
1519 #ifndef CONFIG_USER_ONLY
1521 && (mask
& ~old_mask
) != 0) {
1522 cpu_abort(env
, "Raised interrupt while not in I/O function");
1526 tb
= env
->current_tb
;
1527 /* if the cpu is currently executing code, we must unlink it and
1528 all the potentially executing TB */
1529 if (tb
&& !testandset(&interrupt_lock
)) {
1530 env
->current_tb
= NULL
;
1531 tb_reset_jump_recursive(tb
);
1532 resetlock(&interrupt_lock
);
1538 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1540 env
->interrupt_request
&= ~mask
;
1543 const CPULogItem cpu_log_items
[] = {
1544 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1545 "show generated host assembly code for each compiled TB" },
1546 { CPU_LOG_TB_IN_ASM
, "in_asm",
1547 "show target assembly code for each compiled TB" },
1548 { CPU_LOG_TB_OP
, "op",
1549 "show micro ops for each compiled TB" },
1550 { CPU_LOG_TB_OP_OPT
, "op_opt",
1553 "before eflags optimization and "
1555 "after liveness analysis" },
1556 { CPU_LOG_INT
, "int",
1557 "show interrupts/exceptions in short format" },
1558 { CPU_LOG_EXEC
, "exec",
1559 "show trace before each executed TB (lots of logs)" },
1560 { CPU_LOG_TB_CPU
, "cpu",
1561 "show CPU state before block translation" },
1563 { CPU_LOG_PCALL
, "pcall",
1564 "show protected mode far calls/returns/exceptions" },
1565 { CPU_LOG_RESET
, "cpu_reset",
1566 "show CPU state before CPU resets" },
1569 { CPU_LOG_IOPORT
, "ioport",
1570 "show all i/o ports accesses" },
1575 static int cmp1(const char *s1
, int n
, const char *s2
)
1577 if (strlen(s2
) != n
)
1579 return memcmp(s1
, s2
, n
) == 0;
1582 /* takes a comma separated list of log masks. Return 0 if error. */
1583 int cpu_str_to_log_mask(const char *str
)
1585 const CPULogItem
*item
;
1592 p1
= strchr(p
, ',');
1595 if(cmp1(p
,p1
-p
,"all")) {
1596 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1600 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1601 if (cmp1(p
, p1
- p
, item
->name
))
1615 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1622 fprintf(stderr
, "qemu: fatal: ");
1623 vfprintf(stderr
, fmt
, ap
);
1624 fprintf(stderr
, "\n");
1626 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1628 cpu_dump_state(env
, stderr
, fprintf
, 0);
1630 if (qemu_log_enabled()) {
1631 qemu_log("qemu: fatal: ");
1632 qemu_log_vprintf(fmt
, ap2
);
1635 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1637 log_cpu_state(env
, 0);
1647 CPUState
*cpu_copy(CPUState
*env
)
1649 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1650 CPUState
*next_cpu
= new_env
->next_cpu
;
1651 int cpu_index
= new_env
->cpu_index
;
1652 #if defined(TARGET_HAS_ICE)
1657 memcpy(new_env
, env
, sizeof(CPUState
));
1659 /* Preserve chaining and index. */
1660 new_env
->next_cpu
= next_cpu
;
1661 new_env
->cpu_index
= cpu_index
;
1663 /* Clone all break/watchpoints.
1664 Note: Once we support ptrace with hw-debug register access, make sure
1665 BP_CPU break/watchpoints are handled correctly on clone. */
1666 TAILQ_INIT(&env
->breakpoints
);
1667 TAILQ_INIT(&env
->watchpoints
);
1668 #if defined(TARGET_HAS_ICE)
1669 TAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1670 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1672 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1673 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1681 #if !defined(CONFIG_USER_ONLY)
1683 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1687 /* Discard jump cache entries for any tb which might potentially
1688 overlap the flushed page. */
1689 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1690 memset (&env
->tb_jmp_cache
[i
], 0,
1691 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1693 i
= tb_jmp_cache_hash_page(addr
);
1694 memset (&env
->tb_jmp_cache
[i
], 0,
1695 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1698 /* NOTE: if flush_global is true, also flush global entries (not
1700 void tlb_flush(CPUState
*env
, int flush_global
)
1704 #if defined(DEBUG_TLB)
1705 printf("tlb_flush:\n");
1707 /* must reset current TB so that interrupts cannot modify the
1708 links while we are modifying them */
1709 env
->current_tb
= NULL
;
1711 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1712 env
->tlb_table
[0][i
].addr_read
= -1;
1713 env
->tlb_table
[0][i
].addr_write
= -1;
1714 env
->tlb_table
[0][i
].addr_code
= -1;
1715 env
->tlb_table
[1][i
].addr_read
= -1;
1716 env
->tlb_table
[1][i
].addr_write
= -1;
1717 env
->tlb_table
[1][i
].addr_code
= -1;
1718 #if (NB_MMU_MODES >= 3)
1719 env
->tlb_table
[2][i
].addr_read
= -1;
1720 env
->tlb_table
[2][i
].addr_write
= -1;
1721 env
->tlb_table
[2][i
].addr_code
= -1;
1722 #if (NB_MMU_MODES == 4)
1723 env
->tlb_table
[3][i
].addr_read
= -1;
1724 env
->tlb_table
[3][i
].addr_write
= -1;
1725 env
->tlb_table
[3][i
].addr_code
= -1;
1730 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1733 if (env
->kqemu_enabled
) {
1734 kqemu_flush(env
, flush_global
);
1740 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1742 if (addr
== (tlb_entry
->addr_read
&
1743 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1744 addr
== (tlb_entry
->addr_write
&
1745 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1746 addr
== (tlb_entry
->addr_code
&
1747 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1748 tlb_entry
->addr_read
= -1;
1749 tlb_entry
->addr_write
= -1;
1750 tlb_entry
->addr_code
= -1;
1754 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1758 #if defined(DEBUG_TLB)
1759 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1761 /* must reset current TB so that interrupts cannot modify the
1762 links while we are modifying them */
1763 env
->current_tb
= NULL
;
1765 addr
&= TARGET_PAGE_MASK
;
1766 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1767 tlb_flush_entry(&env
->tlb_table
[0][i
], addr
);
1768 tlb_flush_entry(&env
->tlb_table
[1][i
], addr
);
1769 #if (NB_MMU_MODES >= 3)
1770 tlb_flush_entry(&env
->tlb_table
[2][i
], addr
);
1771 #if (NB_MMU_MODES == 4)
1772 tlb_flush_entry(&env
->tlb_table
[3][i
], addr
);
1776 tlb_flush_jmp_cache(env
, addr
);
1779 if (env
->kqemu_enabled
) {
1780 kqemu_flush_page(env
, addr
);
1785 /* update the TLBs so that writes to code in the virtual page 'addr'
1787 static void tlb_protect_code(ram_addr_t ram_addr
)
1789 cpu_physical_memory_reset_dirty(ram_addr
,
1790 ram_addr
+ TARGET_PAGE_SIZE
,
1794 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1795 tested for self modifying code */
1796 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1799 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1802 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1803 unsigned long start
, unsigned long length
)
1806 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1807 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1808 if ((addr
- start
) < length
) {
1809 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1814 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1818 unsigned long length
, start1
;
1822 start
&= TARGET_PAGE_MASK
;
1823 end
= TARGET_PAGE_ALIGN(end
);
1825 length
= end
- start
;
1828 len
= length
>> TARGET_PAGE_BITS
;
1830 /* XXX: should not depend on cpu context */
1832 if (env
->kqemu_enabled
) {
1835 for(i
= 0; i
< len
; i
++) {
1836 kqemu_set_notdirty(env
, addr
);
1837 addr
+= TARGET_PAGE_SIZE
;
1841 mask
= ~dirty_flags
;
1842 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
1843 for(i
= 0; i
< len
; i
++)
1846 /* we modify the TLB cache so that the dirty bit will be set again
1847 when accessing the range */
1848 start1
= start
+ (unsigned long)phys_ram_base
;
1849 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1850 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1851 tlb_reset_dirty_range(&env
->tlb_table
[0][i
], start1
, length
);
1852 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1853 tlb_reset_dirty_range(&env
->tlb_table
[1][i
], start1
, length
);
1854 #if (NB_MMU_MODES >= 3)
1855 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1856 tlb_reset_dirty_range(&env
->tlb_table
[2][i
], start1
, length
);
1857 #if (NB_MMU_MODES == 4)
1858 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1859 tlb_reset_dirty_range(&env
->tlb_table
[3][i
], start1
, length
);
1865 int cpu_physical_memory_set_dirty_tracking(int enable
)
1867 in_migration
= enable
;
1871 int cpu_physical_memory_get_dirty_tracking(void)
1873 return in_migration
;
1876 void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
, target_phys_addr_t end_addr
)
1879 kvm_physical_sync_dirty_bitmap(start_addr
, end_addr
);
1882 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
1884 ram_addr_t ram_addr
;
1886 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1887 ram_addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) +
1888 tlb_entry
->addend
- (unsigned long)phys_ram_base
;
1889 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
1890 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
1895 /* update the TLB according to the current state of the dirty bits */
1896 void cpu_tlb_update_dirty(CPUState
*env
)
1899 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1900 tlb_update_dirty(&env
->tlb_table
[0][i
]);
1901 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1902 tlb_update_dirty(&env
->tlb_table
[1][i
]);
1903 #if (NB_MMU_MODES >= 3)
1904 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1905 tlb_update_dirty(&env
->tlb_table
[2][i
]);
1906 #if (NB_MMU_MODES == 4)
1907 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
1908 tlb_update_dirty(&env
->tlb_table
[3][i
]);
1913 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
1915 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
1916 tlb_entry
->addr_write
= vaddr
;
1919 /* update the TLB corresponding to virtual page vaddr
1920 so that it is no longer dirty */
1921 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
1925 vaddr
&= TARGET_PAGE_MASK
;
1926 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1927 tlb_set_dirty1(&env
->tlb_table
[0][i
], vaddr
);
1928 tlb_set_dirty1(&env
->tlb_table
[1][i
], vaddr
);
1929 #if (NB_MMU_MODES >= 3)
1930 tlb_set_dirty1(&env
->tlb_table
[2][i
], vaddr
);
1931 #if (NB_MMU_MODES == 4)
1932 tlb_set_dirty1(&env
->tlb_table
[3][i
], vaddr
);
1937 /* add a new TLB entry. At most one entry for a given virtual address
1938 is permitted. Return 0 if OK or 2 if the page could not be mapped
1939 (can only happen in non SOFTMMU mode for I/O pages or pages
1940 conflicting with the host address space). */
1941 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
1942 target_phys_addr_t paddr
, int prot
,
1943 int mmu_idx
, int is_softmmu
)
1948 target_ulong address
;
1949 target_ulong code_address
;
1950 target_phys_addr_t addend
;
1954 target_phys_addr_t iotlb
;
1956 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
1958 pd
= IO_MEM_UNASSIGNED
;
1960 pd
= p
->phys_offset
;
1962 #if defined(DEBUG_TLB)
1963 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1964 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
1969 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
1970 /* IO memory case (romd handled later) */
1971 address
|= TLB_MMIO
;
1973 addend
= (unsigned long)phys_ram_base
+ (pd
& TARGET_PAGE_MASK
);
1974 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
1976 iotlb
= pd
& TARGET_PAGE_MASK
;
1977 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
1978 iotlb
|= IO_MEM_NOTDIRTY
;
1980 iotlb
|= IO_MEM_ROM
;
1982 /* IO handlers are currently passed a phsical address.
1983 It would be nice to pass an offset from the base address
1984 of that region. This would avoid having to special case RAM,
1985 and avoid full address decoding in every device.
1986 We can't use the high bits of pd for this because
1987 IO_MEM_ROMD uses these as a ram address. */
1988 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
1990 iotlb
+= p
->region_offset
;
1996 code_address
= address
;
1997 /* Make accesses to pages with watchpoints go via the
1998 watchpoint trap routines. */
1999 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2000 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2001 iotlb
= io_mem_watch
+ paddr
;
2002 /* TODO: The memory case can be optimized by not trapping
2003 reads of pages with a write breakpoint. */
2004 address
|= TLB_MMIO
;
2008 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2009 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2010 te
= &env
->tlb_table
[mmu_idx
][index
];
2011 te
->addend
= addend
- vaddr
;
2012 if (prot
& PAGE_READ
) {
2013 te
->addr_read
= address
;
2018 if (prot
& PAGE_EXEC
) {
2019 te
->addr_code
= code_address
;
2023 if (prot
& PAGE_WRITE
) {
2024 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2025 (pd
& IO_MEM_ROMD
)) {
2026 /* Write access calls the I/O callback. */
2027 te
->addr_write
= address
| TLB_MMIO
;
2028 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2029 !cpu_physical_memory_is_dirty(pd
)) {
2030 te
->addr_write
= address
| TLB_NOTDIRTY
;
2032 te
->addr_write
= address
;
2035 te
->addr_write
= -1;
2042 void tlb_flush(CPUState
*env
, int flush_global
)
2046 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2050 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2051 target_phys_addr_t paddr
, int prot
,
2052 int mmu_idx
, int is_softmmu
)
2057 /* dump memory mappings */
2058 void page_dump(FILE *f
)
2060 unsigned long start
, end
;
2061 int i
, j
, prot
, prot1
;
2064 fprintf(f
, "%-8s %-8s %-8s %s\n",
2065 "start", "end", "size", "prot");
2069 for(i
= 0; i
<= L1_SIZE
; i
++) {
2074 for(j
= 0;j
< L2_SIZE
; j
++) {
2079 if (prot1
!= prot
) {
2080 end
= (i
<< (32 - L1_BITS
)) | (j
<< TARGET_PAGE_BITS
);
2082 fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2083 start
, end
, end
- start
,
2084 prot
& PAGE_READ
? 'r' : '-',
2085 prot
& PAGE_WRITE
? 'w' : '-',
2086 prot
& PAGE_EXEC
? 'x' : '-');
2100 int page_get_flags(target_ulong address
)
2104 p
= page_find(address
>> TARGET_PAGE_BITS
);
2110 /* modify the flags of a page and invalidate the code if
2111 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2112 depending on PAGE_WRITE */
2113 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2118 /* mmap_lock should already be held. */
2119 start
= start
& TARGET_PAGE_MASK
;
2120 end
= TARGET_PAGE_ALIGN(end
);
2121 if (flags
& PAGE_WRITE
)
2122 flags
|= PAGE_WRITE_ORG
;
2123 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2124 p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
);
2125 /* We may be called for host regions that are outside guest
2129 /* if the write protection is set, then we invalidate the code
2131 if (!(p
->flags
& PAGE_WRITE
) &&
2132 (flags
& PAGE_WRITE
) &&
2134 tb_invalidate_phys_page(addr
, 0, NULL
);
2140 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2146 if (start
+ len
< start
)
2147 /* we've wrapped around */
2150 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2151 start
= start
& TARGET_PAGE_MASK
;
2153 for(addr
= start
; addr
< end
; addr
+= TARGET_PAGE_SIZE
) {
2154 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2157 if( !(p
->flags
& PAGE_VALID
) )
2160 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2162 if (flags
& PAGE_WRITE
) {
2163 if (!(p
->flags
& PAGE_WRITE_ORG
))
2165 /* unprotect the page if it was put read-only because it
2166 contains translated code */
2167 if (!(p
->flags
& PAGE_WRITE
)) {
2168 if (!page_unprotect(addr
, 0, NULL
))
2177 /* called from signal handler: invalidate the code and unprotect the
2178 page. Return TRUE if the fault was succesfully handled. */
2179 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2181 unsigned int page_index
, prot
, pindex
;
2183 target_ulong host_start
, host_end
, addr
;
2185 /* Technically this isn't safe inside a signal handler. However we
2186 know this only ever happens in a synchronous SEGV handler, so in
2187 practice it seems to be ok. */
2190 host_start
= address
& qemu_host_page_mask
;
2191 page_index
= host_start
>> TARGET_PAGE_BITS
;
2192 p1
= page_find(page_index
);
2197 host_end
= host_start
+ qemu_host_page_size
;
2200 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2204 /* if the page was really writable, then we change its
2205 protection back to writable */
2206 if (prot
& PAGE_WRITE_ORG
) {
2207 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2208 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2209 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2210 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2211 p1
[pindex
].flags
|= PAGE_WRITE
;
2212 /* and since the content will be modified, we must invalidate
2213 the corresponding translated code. */
2214 tb_invalidate_phys_page(address
, pc
, puc
);
2215 #ifdef DEBUG_TB_CHECK
2216 tb_invalidate_check(address
);
2226 static inline void tlb_set_dirty(CPUState
*env
,
2227 unsigned long addr
, target_ulong vaddr
)
2230 #endif /* defined(CONFIG_USER_ONLY) */
2232 #if !defined(CONFIG_USER_ONLY)
2234 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2235 ram_addr_t memory
, ram_addr_t region_offset
);
2236 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2237 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2238 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2241 if (addr > start_addr) \
2244 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2245 if (start_addr2 > 0) \
2249 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2250 end_addr2 = TARGET_PAGE_SIZE - 1; \
2252 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2253 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2258 /* register physical memory. 'size' must be a multiple of the target
2259 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2260 io memory page. The address used when calling the IO function is
2261 the offset from the start of the region, plus region_offset. Both
2262 start_region and regon_offset are rounded down to a page boundary
2263 before calculating this offset. This should not be a problem unless
2264 the low bits of start_addr and region_offset differ. */
2265 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2267 ram_addr_t phys_offset
,
2268 ram_addr_t region_offset
)
2270 target_phys_addr_t addr
, end_addr
;
2273 ram_addr_t orig_size
= size
;
2277 /* XXX: should not depend on cpu context */
2279 if (env
->kqemu_enabled
) {
2280 kqemu_set_phys_mem(start_addr
, size
, phys_offset
);
2284 kvm_set_phys_mem(start_addr
, size
, phys_offset
);
2286 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2287 region_offset
= start_addr
;
2289 region_offset
&= TARGET_PAGE_MASK
;
2290 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2291 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2292 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2293 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2294 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2295 ram_addr_t orig_memory
= p
->phys_offset
;
2296 target_phys_addr_t start_addr2
, end_addr2
;
2297 int need_subpage
= 0;
2299 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2301 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2302 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2303 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2304 &p
->phys_offset
, orig_memory
,
2307 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2310 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2312 p
->region_offset
= 0;
2314 p
->phys_offset
= phys_offset
;
2315 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2316 (phys_offset
& IO_MEM_ROMD
))
2317 phys_offset
+= TARGET_PAGE_SIZE
;
2320 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2321 p
->phys_offset
= phys_offset
;
2322 p
->region_offset
= region_offset
;
2323 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2324 (phys_offset
& IO_MEM_ROMD
)) {
2325 phys_offset
+= TARGET_PAGE_SIZE
;
2327 target_phys_addr_t start_addr2
, end_addr2
;
2328 int need_subpage
= 0;
2330 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2331 end_addr2
, need_subpage
);
2333 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2334 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2335 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2336 addr
& TARGET_PAGE_MASK
);
2337 subpage_register(subpage
, start_addr2
, end_addr2
,
2338 phys_offset
, region_offset
);
2339 p
->region_offset
= 0;
2343 region_offset
+= TARGET_PAGE_SIZE
;
2346 /* since each CPU stores ram addresses in its TLB cache, we must
2347 reset the modified entries */
2349 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2354 /* XXX: temporary until new memory mapping API */
2355 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2359 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2361 return IO_MEM_UNASSIGNED
;
2362 return p
->phys_offset
;
2365 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2368 kvm_coalesce_mmio_region(addr
, size
);
2371 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2374 kvm_uncoalesce_mmio_region(addr
, size
);
2377 /* XXX: better than nothing */
2378 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2381 if ((phys_ram_alloc_offset
+ size
) > phys_ram_size
) {
2382 fprintf(stderr
, "Not enough memory (requested_size = %" PRIu64
", max memory = %" PRIu64
")\n",
2383 (uint64_t)size
, (uint64_t)phys_ram_size
);
2386 addr
= phys_ram_alloc_offset
;
2387 phys_ram_alloc_offset
= TARGET_PAGE_ALIGN(phys_ram_alloc_offset
+ size
);
2391 void qemu_ram_free(ram_addr_t addr
)
2395 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2397 #ifdef DEBUG_UNASSIGNED
2398 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2400 #if defined(TARGET_SPARC)
2401 do_unassigned_access(addr
, 0, 0, 0, 1);
2406 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2408 #ifdef DEBUG_UNASSIGNED
2409 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2411 #if defined(TARGET_SPARC)
2412 do_unassigned_access(addr
, 0, 0, 0, 2);
2417 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2419 #ifdef DEBUG_UNASSIGNED
2420 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2422 #if defined(TARGET_SPARC)
2423 do_unassigned_access(addr
, 0, 0, 0, 4);
2428 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2430 #ifdef DEBUG_UNASSIGNED
2431 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2433 #if defined(TARGET_SPARC)
2434 do_unassigned_access(addr
, 1, 0, 0, 1);
2438 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2440 #ifdef DEBUG_UNASSIGNED
2441 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2443 #if defined(TARGET_SPARC)
2444 do_unassigned_access(addr
, 1, 0, 0, 2);
2448 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2450 #ifdef DEBUG_UNASSIGNED
2451 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2453 #if defined(TARGET_SPARC)
2454 do_unassigned_access(addr
, 1, 0, 0, 4);
2458 static CPUReadMemoryFunc
*unassigned_mem_read
[3] = {
2459 unassigned_mem_readb
,
2460 unassigned_mem_readw
,
2461 unassigned_mem_readl
,
2464 static CPUWriteMemoryFunc
*unassigned_mem_write
[3] = {
2465 unassigned_mem_writeb
,
2466 unassigned_mem_writew
,
2467 unassigned_mem_writel
,
2470 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2474 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2475 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2476 #if !defined(CONFIG_USER_ONLY)
2477 tb_invalidate_phys_page_fast(ram_addr
, 1);
2478 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2481 stb_p(phys_ram_base
+ ram_addr
, val
);
2483 if (cpu_single_env
->kqemu_enabled
&&
2484 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2485 kqemu_modify_page(cpu_single_env
, ram_addr
);
2487 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2488 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2489 /* we remove the notdirty callback only if the code has been
2491 if (dirty_flags
== 0xff)
2492 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2495 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2499 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2500 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2501 #if !defined(CONFIG_USER_ONLY)
2502 tb_invalidate_phys_page_fast(ram_addr
, 2);
2503 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2506 stw_p(phys_ram_base
+ ram_addr
, val
);
2508 if (cpu_single_env
->kqemu_enabled
&&
2509 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2510 kqemu_modify_page(cpu_single_env
, ram_addr
);
2512 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2513 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2514 /* we remove the notdirty callback only if the code has been
2516 if (dirty_flags
== 0xff)
2517 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2520 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2524 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2525 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2526 #if !defined(CONFIG_USER_ONLY)
2527 tb_invalidate_phys_page_fast(ram_addr
, 4);
2528 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2531 stl_p(phys_ram_base
+ ram_addr
, val
);
2533 if (cpu_single_env
->kqemu_enabled
&&
2534 (dirty_flags
& KQEMU_MODIFY_PAGE_MASK
) != KQEMU_MODIFY_PAGE_MASK
)
2535 kqemu_modify_page(cpu_single_env
, ram_addr
);
2537 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2538 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2539 /* we remove the notdirty callback only if the code has been
2541 if (dirty_flags
== 0xff)
2542 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2545 static CPUReadMemoryFunc
*error_mem_read
[3] = {
2546 NULL
, /* never used */
2547 NULL
, /* never used */
2548 NULL
, /* never used */
2551 static CPUWriteMemoryFunc
*notdirty_mem_write
[3] = {
2552 notdirty_mem_writeb
,
2553 notdirty_mem_writew
,
2554 notdirty_mem_writel
,
2557 /* Generate a debug exception if a watchpoint has been hit. */
2558 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2560 CPUState
*env
= cpu_single_env
;
2561 target_ulong pc
, cs_base
;
2562 TranslationBlock
*tb
;
2567 if (env
->watchpoint_hit
) {
2568 /* We re-entered the check after replacing the TB. Now raise
2569 * the debug interrupt so that is will trigger after the
2570 * current instruction. */
2571 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2574 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2575 TAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2576 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2577 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2578 wp
->flags
|= BP_WATCHPOINT_HIT
;
2579 if (!env
->watchpoint_hit
) {
2580 env
->watchpoint_hit
= wp
;
2581 tb
= tb_find_pc(env
->mem_io_pc
);
2583 cpu_abort(env
, "check_watchpoint: could not find TB for "
2584 "pc=%p", (void *)env
->mem_io_pc
);
2586 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2587 tb_phys_invalidate(tb
, -1);
2588 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2589 env
->exception_index
= EXCP_DEBUG
;
2591 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2592 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2594 cpu_resume_from_signal(env
, NULL
);
2597 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2602 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2603 so these check for a hit then pass through to the normal out-of-line
2605 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
2607 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
2608 return ldub_phys(addr
);
2611 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
2613 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
2614 return lduw_phys(addr
);
2617 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
2619 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
2620 return ldl_phys(addr
);
2623 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
2626 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
2627 stb_phys(addr
, val
);
2630 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
2633 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
2634 stw_phys(addr
, val
);
2637 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
2640 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
2641 stl_phys(addr
, val
);
2644 static CPUReadMemoryFunc
*watch_mem_read
[3] = {
2650 static CPUWriteMemoryFunc
*watch_mem_write
[3] = {
2656 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
2662 idx
= SUBPAGE_IDX(addr
);
2663 #if defined(DEBUG_SUBPAGE)
2664 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
2665 mmio
, len
, addr
, idx
);
2667 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
2668 addr
+ mmio
->region_offset
[idx
][0][len
]);
2673 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
2674 uint32_t value
, unsigned int len
)
2678 idx
= SUBPAGE_IDX(addr
);
2679 #if defined(DEBUG_SUBPAGE)
2680 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
2681 mmio
, len
, addr
, idx
, value
);
2683 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
2684 addr
+ mmio
->region_offset
[idx
][1][len
],
2688 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
2690 #if defined(DEBUG_SUBPAGE)
2691 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2694 return subpage_readlen(opaque
, addr
, 0);
2697 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
2700 #if defined(DEBUG_SUBPAGE)
2701 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2703 subpage_writelen(opaque
, addr
, value
, 0);
2706 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
2708 #if defined(DEBUG_SUBPAGE)
2709 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2712 return subpage_readlen(opaque
, addr
, 1);
2715 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
2718 #if defined(DEBUG_SUBPAGE)
2719 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2721 subpage_writelen(opaque
, addr
, value
, 1);
2724 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
2726 #if defined(DEBUG_SUBPAGE)
2727 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
2730 return subpage_readlen(opaque
, addr
, 2);
2733 static void subpage_writel (void *opaque
,
2734 target_phys_addr_t addr
, uint32_t value
)
2736 #if defined(DEBUG_SUBPAGE)
2737 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
2739 subpage_writelen(opaque
, addr
, value
, 2);
2742 static CPUReadMemoryFunc
*subpage_read
[] = {
2748 static CPUWriteMemoryFunc
*subpage_write
[] = {
2754 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2755 ram_addr_t memory
, ram_addr_t region_offset
)
2760 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
2762 idx
= SUBPAGE_IDX(start
);
2763 eidx
= SUBPAGE_IDX(end
);
2764 #if defined(DEBUG_SUBPAGE)
2765 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__
,
2766 mmio
, start
, end
, idx
, eidx
, memory
);
2768 memory
>>= IO_MEM_SHIFT
;
2769 for (; idx
<= eidx
; idx
++) {
2770 for (i
= 0; i
< 4; i
++) {
2771 if (io_mem_read
[memory
][i
]) {
2772 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
2773 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
2774 mmio
->region_offset
[idx
][0][i
] = region_offset
;
2776 if (io_mem_write
[memory
][i
]) {
2777 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
2778 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
2779 mmio
->region_offset
[idx
][1][i
] = region_offset
;
2787 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2788 ram_addr_t orig_memory
, ram_addr_t region_offset
)
2793 mmio
= qemu_mallocz(sizeof(subpage_t
));
2796 subpage_memory
= cpu_register_io_memory(0, subpage_read
, subpage_write
, mmio
);
2797 #if defined(DEBUG_SUBPAGE)
2798 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
2799 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
2801 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
2802 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
2808 static int get_free_io_mem_idx(void)
2812 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
2813 if (!io_mem_used
[i
]) {
2821 static void io_mem_init(void)
2825 cpu_register_io_memory(IO_MEM_ROM
>> IO_MEM_SHIFT
, error_mem_read
, unassigned_mem_write
, NULL
);
2826 cpu_register_io_memory(IO_MEM_UNASSIGNED
>> IO_MEM_SHIFT
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
2827 cpu_register_io_memory(IO_MEM_NOTDIRTY
>> IO_MEM_SHIFT
, error_mem_read
, notdirty_mem_write
, NULL
);
2831 io_mem_watch
= cpu_register_io_memory(0, watch_mem_read
,
2832 watch_mem_write
, NULL
);
2833 /* alloc dirty bits array */
2834 phys_ram_dirty
= qemu_vmalloc(phys_ram_size
>> TARGET_PAGE_BITS
);
2835 memset(phys_ram_dirty
, 0xff, phys_ram_size
>> TARGET_PAGE_BITS
);
2838 /* mem_read and mem_write are arrays of functions containing the
2839 function to access byte (index 0), word (index 1) and dword (index
2840 2). Functions can be omitted with a NULL function pointer. The
2841 registered functions may be modified dynamically later.
2842 If io_index is non zero, the corresponding io zone is
2843 modified. If it is zero, a new io zone is allocated. The return
2844 value can be used with cpu_register_physical_memory(). (-1) is
2845 returned if error. */
2846 int cpu_register_io_memory(int io_index
,
2847 CPUReadMemoryFunc
**mem_read
,
2848 CPUWriteMemoryFunc
**mem_write
,
2851 int i
, subwidth
= 0;
2853 if (io_index
<= 0) {
2854 io_index
= get_free_io_mem_idx();
2858 if (io_index
>= IO_MEM_NB_ENTRIES
)
2862 for(i
= 0;i
< 3; i
++) {
2863 if (!mem_read
[i
] || !mem_write
[i
])
2864 subwidth
= IO_MEM_SUBWIDTH
;
2865 io_mem_read
[io_index
][i
] = mem_read
[i
];
2866 io_mem_write
[io_index
][i
] = mem_write
[i
];
2868 io_mem_opaque
[io_index
] = opaque
;
2869 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
2872 void cpu_unregister_io_memory(int io_table_address
)
2875 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
2877 for (i
=0;i
< 3; i
++) {
2878 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
2879 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
2881 io_mem_opaque
[io_index
] = NULL
;
2882 io_mem_used
[io_index
] = 0;
2885 CPUWriteMemoryFunc
**cpu_get_io_memory_write(int io_index
)
2887 return io_mem_write
[io_index
>> IO_MEM_SHIFT
];
2890 CPUReadMemoryFunc
**cpu_get_io_memory_read(int io_index
)
2892 return io_mem_read
[io_index
>> IO_MEM_SHIFT
];
2895 #endif /* !defined(CONFIG_USER_ONLY) */
2897 /* physical memory access (slow version, mainly for debug) */
2898 #if defined(CONFIG_USER_ONLY)
2899 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2900 int len
, int is_write
)
2907 page
= addr
& TARGET_PAGE_MASK
;
2908 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2911 flags
= page_get_flags(page
);
2912 if (!(flags
& PAGE_VALID
))
2915 if (!(flags
& PAGE_WRITE
))
2917 /* XXX: this code should not depend on lock_user */
2918 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
2919 /* FIXME - should this return an error rather than just fail? */
2922 unlock_user(p
, addr
, l
);
2924 if (!(flags
& PAGE_READ
))
2926 /* XXX: this code should not depend on lock_user */
2927 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
2928 /* FIXME - should this return an error rather than just fail? */
2931 unlock_user(p
, addr
, 0);
2940 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
2941 int len
, int is_write
)
2946 target_phys_addr_t page
;
2951 page
= addr
& TARGET_PAGE_MASK
;
2952 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
2955 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
2957 pd
= IO_MEM_UNASSIGNED
;
2959 pd
= p
->phys_offset
;
2963 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
2964 target_phys_addr_t addr1
= addr
;
2965 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
2967 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
2968 /* XXX: could force cpu_single_env to NULL to avoid
2970 if (l
>= 4 && ((addr1
& 3) == 0)) {
2971 /* 32 bit write access */
2973 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
2975 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
2976 /* 16 bit write access */
2978 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
2981 /* 8 bit write access */
2983 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
2987 unsigned long addr1
;
2988 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
2990 ptr
= phys_ram_base
+ addr1
;
2991 memcpy(ptr
, buf
, l
);
2992 if (!cpu_physical_memory_is_dirty(addr1
)) {
2993 /* invalidate code */
2994 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
2996 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
2997 (0xff & ~CODE_DIRTY_FLAG
);
3001 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3002 !(pd
& IO_MEM_ROMD
)) {
3003 target_phys_addr_t addr1
= addr
;
3005 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3007 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3008 if (l
>= 4 && ((addr1
& 3) == 0)) {
3009 /* 32 bit read access */
3010 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3013 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3014 /* 16 bit read access */
3015 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3019 /* 8 bit read access */
3020 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3026 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3027 (addr
& ~TARGET_PAGE_MASK
);
3028 memcpy(buf
, ptr
, l
);
3037 /* used for ROM loading : can write in RAM and ROM */
3038 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3039 const uint8_t *buf
, int len
)
3043 target_phys_addr_t page
;
3048 page
= addr
& TARGET_PAGE_MASK
;
3049 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3052 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3054 pd
= IO_MEM_UNASSIGNED
;
3056 pd
= p
->phys_offset
;
3059 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3060 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3061 !(pd
& IO_MEM_ROMD
)) {
3064 unsigned long addr1
;
3065 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3067 ptr
= phys_ram_base
+ addr1
;
3068 memcpy(ptr
, buf
, l
);
3078 target_phys_addr_t addr
;
3079 target_phys_addr_t len
;
3082 static BounceBuffer bounce
;
3084 typedef struct MapClient
{
3086 void (*callback
)(void *opaque
);
3087 LIST_ENTRY(MapClient
) link
;
3090 static LIST_HEAD(map_client_list
, MapClient
) map_client_list
3091 = LIST_HEAD_INITIALIZER(map_client_list
);
3093 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3095 MapClient
*client
= qemu_malloc(sizeof(*client
));
3097 client
->opaque
= opaque
;
3098 client
->callback
= callback
;
3099 LIST_INSERT_HEAD(&map_client_list
, client
, link
);
3103 void cpu_unregister_map_client(void *_client
)
3105 MapClient
*client
= (MapClient
*)_client
;
3107 LIST_REMOVE(client
, link
);
3110 static void cpu_notify_map_clients(void)
3114 while (!LIST_EMPTY(&map_client_list
)) {
3115 client
= LIST_FIRST(&map_client_list
);
3116 client
->callback(client
->opaque
);
3117 LIST_REMOVE(client
, link
);
3121 /* Map a physical memory region into a host virtual address.
3122 * May map a subset of the requested range, given by and returned in *plen.
3123 * May return NULL if resources needed to perform the mapping are exhausted.
3124 * Use only for reads OR writes - not for read-modify-write operations.
3125 * Use cpu_register_map_client() to know when retrying the map operation is
3126 * likely to succeed.
3128 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3129 target_phys_addr_t
*plen
,
3132 target_phys_addr_t len
= *plen
;
3133 target_phys_addr_t done
= 0;
3135 uint8_t *ret
= NULL
;
3137 target_phys_addr_t page
;
3140 unsigned long addr1
;
3143 page
= addr
& TARGET_PAGE_MASK
;
3144 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3147 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3149 pd
= IO_MEM_UNASSIGNED
;
3151 pd
= p
->phys_offset
;
3154 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3155 if (done
|| bounce
.buffer
) {
3158 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3162 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3164 ptr
= bounce
.buffer
;
3166 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3167 ptr
= phys_ram_base
+ addr1
;
3171 } else if (ret
+ done
!= ptr
) {
3183 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3184 * Will also mark the memory as dirty if is_write == 1. access_len gives
3185 * the amount of memory that was actually read or written by the caller.
3187 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3188 int is_write
, target_phys_addr_t access_len
)
3190 if (buffer
!= bounce
.buffer
) {
3192 unsigned long addr1
= (uint8_t *)buffer
- phys_ram_base
;
3193 while (access_len
) {
3195 l
= TARGET_PAGE_SIZE
;
3198 if (!cpu_physical_memory_is_dirty(addr1
)) {
3199 /* invalidate code */
3200 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3202 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3203 (0xff & ~CODE_DIRTY_FLAG
);
3212 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3214 qemu_free(bounce
.buffer
);
3215 bounce
.buffer
= NULL
;
3216 cpu_notify_map_clients();
3219 /* warning: addr must be aligned */
3220 uint32_t ldl_phys(target_phys_addr_t addr
)
3228 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3230 pd
= IO_MEM_UNASSIGNED
;
3232 pd
= p
->phys_offset
;
3235 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3236 !(pd
& IO_MEM_ROMD
)) {
3238 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3240 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3241 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3244 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3245 (addr
& ~TARGET_PAGE_MASK
);
3251 /* warning: addr must be aligned */
3252 uint64_t ldq_phys(target_phys_addr_t addr
)
3260 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3262 pd
= IO_MEM_UNASSIGNED
;
3264 pd
= p
->phys_offset
;
3267 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3268 !(pd
& IO_MEM_ROMD
)) {
3270 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3272 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3273 #ifdef TARGET_WORDS_BIGENDIAN
3274 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3275 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3277 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3278 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3282 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3283 (addr
& ~TARGET_PAGE_MASK
);
3290 uint32_t ldub_phys(target_phys_addr_t addr
)
3293 cpu_physical_memory_read(addr
, &val
, 1);
3298 uint32_t lduw_phys(target_phys_addr_t addr
)
3301 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3302 return tswap16(val
);
3305 /* warning: addr must be aligned. The ram page is not masked as dirty
3306 and the code inside is not invalidated. It is useful if the dirty
3307 bits are used to track modified PTEs */
3308 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3315 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3317 pd
= IO_MEM_UNASSIGNED
;
3319 pd
= p
->phys_offset
;
3322 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3323 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3325 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3326 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3328 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3329 ptr
= phys_ram_base
+ addr1
;
3332 if (unlikely(in_migration
)) {
3333 if (!cpu_physical_memory_is_dirty(addr1
)) {
3334 /* invalidate code */
3335 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3337 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3338 (0xff & ~CODE_DIRTY_FLAG
);
3344 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3351 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3353 pd
= IO_MEM_UNASSIGNED
;
3355 pd
= p
->phys_offset
;
3358 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3359 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3361 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3362 #ifdef TARGET_WORDS_BIGENDIAN
3363 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3364 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3366 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3367 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3370 ptr
= phys_ram_base
+ (pd
& TARGET_PAGE_MASK
) +
3371 (addr
& ~TARGET_PAGE_MASK
);
3376 /* warning: addr must be aligned */
3377 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3384 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3386 pd
= IO_MEM_UNASSIGNED
;
3388 pd
= p
->phys_offset
;
3391 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3392 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3394 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3395 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3397 unsigned long addr1
;
3398 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3400 ptr
= phys_ram_base
+ addr1
;
3402 if (!cpu_physical_memory_is_dirty(addr1
)) {
3403 /* invalidate code */
3404 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3406 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3407 (0xff & ~CODE_DIRTY_FLAG
);
3413 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3416 cpu_physical_memory_write(addr
, &v
, 1);
3420 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3422 uint16_t v
= tswap16(val
);
3423 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3427 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3430 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3435 /* virtual memory access for debug */
3436 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3437 uint8_t *buf
, int len
, int is_write
)
3440 target_phys_addr_t phys_addr
;
3444 page
= addr
& TARGET_PAGE_MASK
;
3445 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3446 /* if no physical page mapped, return an error */
3447 if (phys_addr
== -1)
3449 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3452 cpu_physical_memory_rw(phys_addr
+ (addr
& ~TARGET_PAGE_MASK
),
3461 /* in deterministic execution mode, instructions doing device I/Os
3462 must be at the end of the TB */
3463 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3465 TranslationBlock
*tb
;
3467 target_ulong pc
, cs_base
;
3470 tb
= tb_find_pc((unsigned long)retaddr
);
3472 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3475 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3476 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3477 /* Calculate how many instructions had been executed before the fault
3479 n
= n
- env
->icount_decr
.u16
.low
;
3480 /* Generate a new TB ending on the I/O insn. */
3482 /* On MIPS and SH, delay slot instructions can only be restarted if
3483 they were already the first instruction in the TB. If this is not
3484 the first instruction in a TB then re-execute the preceding
3486 #if defined(TARGET_MIPS)
3487 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3488 env
->active_tc
.PC
-= 4;
3489 env
->icount_decr
.u16
.low
++;
3490 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3492 #elif defined(TARGET_SH4)
3493 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3496 env
->icount_decr
.u16
.low
++;
3497 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3500 /* This should never happen. */
3501 if (n
> CF_COUNT_MASK
)
3502 cpu_abort(env
, "TB too big during recompile");
3504 cflags
= n
| CF_LAST_IO
;
3506 cs_base
= tb
->cs_base
;
3508 tb_phys_invalidate(tb
, -1);
3509 /* FIXME: In theory this could raise an exception. In practice
3510 we have already translated the block once so it's probably ok. */
3511 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3512 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3513 the first in the TB) then we end up generating a whole new TB and
3514 repeating the fault, which is horribly inefficient.
3515 Better would be to execute just this insn uncached, or generate a
3517 cpu_resume_from_signal(env
, NULL
);
3520 void dump_exec_info(FILE *f
,
3521 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3523 int i
, target_code_size
, max_target_code_size
;
3524 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3525 TranslationBlock
*tb
;
3527 target_code_size
= 0;
3528 max_target_code_size
= 0;
3530 direct_jmp_count
= 0;
3531 direct_jmp2_count
= 0;
3532 for(i
= 0; i
< nb_tbs
; i
++) {
3534 target_code_size
+= tb
->size
;
3535 if (tb
->size
> max_target_code_size
)
3536 max_target_code_size
= tb
->size
;
3537 if (tb
->page_addr
[1] != -1)
3539 if (tb
->tb_next_offset
[0] != 0xffff) {
3541 if (tb
->tb_next_offset
[1] != 0xffff) {
3542 direct_jmp2_count
++;
3546 /* XXX: avoid using doubles ? */
3547 cpu_fprintf(f
, "Translation buffer state:\n");
3548 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3549 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3550 cpu_fprintf(f
, "TB count %d/%d\n",
3551 nb_tbs
, code_gen_max_blocks
);
3552 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3553 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3554 max_target_code_size
);
3555 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3556 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3557 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3558 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3560 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3561 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3563 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3565 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3566 cpu_fprintf(f
, "\nStatistics:\n");
3567 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3568 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3569 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3570 tcg_dump_info(f
, cpu_fprintf
);
3573 #if !defined(CONFIG_USER_ONLY)
3575 #define MMUSUFFIX _cmmu
3576 #define GETPC() NULL
3577 #define env cpu_single_env
3578 #define SOFTMMU_CODE_ACCESS
3581 #include "softmmu_template.h"
3584 #include "softmmu_template.h"
3587 #include "softmmu_template.h"
3590 #include "softmmu_template.h"