2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
36 #include "qemu-timer.h"
37 #if defined(CONFIG_USER_ONLY)
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <machine/profile.h>
54 #else /* !CONFIG_USER_ONLY */
55 #include "xen-mapcache.h"
59 //#define DEBUG_TB_INVALIDATE
62 //#define DEBUG_UNASSIGNED
64 /* make various TB consistency checks */
65 //#define DEBUG_TB_CHECK
66 //#define DEBUG_TLB_CHECK
68 //#define DEBUG_IOPORT
69 //#define DEBUG_SUBPAGE
71 #if !defined(CONFIG_USER_ONLY)
72 /* TB consistency checks only implemented for usermode emulation. */
76 #define SMC_BITMAP_USE_THRESHOLD 10
78 static TranslationBlock
*tbs
;
79 static int code_gen_max_blocks
;
80 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
82 /* any access to the tbs or the page table must use this lock */
83 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
85 #if defined(__arm__) || defined(__sparc_v9__)
86 /* The prologue must be reachable with a direct jump. ARM and Sparc64
87 have limited branch ranges (possibly also PPC) so place it in a
88 section close to code segment. */
89 #define code_gen_section \
90 __attribute__((__section__(".gen_code"))) \
91 __attribute__((aligned (32)))
93 /* Maximum alignment for Win32 is 16. */
94 #define code_gen_section \
95 __attribute__((aligned (16)))
97 #define code_gen_section \
98 __attribute__((aligned (32)))
101 uint8_t code_gen_prologue
[1024] code_gen_section
;
102 static uint8_t *code_gen_buffer
;
103 static unsigned long code_gen_buffer_size
;
104 /* threshold to flush the translated code buffer */
105 static unsigned long code_gen_buffer_max_size
;
106 static uint8_t *code_gen_ptr
;
108 #if !defined(CONFIG_USER_ONLY)
110 static int in_migration
;
112 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
116 /* current CPU in the current thread. It is only valid inside
118 CPUState
*cpu_single_env
;
119 /* 0 = Do not count executed instructions.
120 1 = Precise instruction counting.
121 2 = Adaptive rate instruction counting. */
123 /* Current instruction counter. While executing translated code this may
124 include some instructions that have not yet been executed. */
127 typedef struct PageDesc
{
128 /* list of TBs intersecting this ram page */
129 TranslationBlock
*first_tb
;
130 /* in order to optimize self modifying code, we count the number
131 of lookups we do to a given page to use a bitmap */
132 unsigned int code_write_count
;
133 uint8_t *code_bitmap
;
134 #if defined(CONFIG_USER_ONLY)
139 /* In system mode we want L1_MAP to be based on ram offsets,
140 while in user mode we want it to be based on virtual addresses. */
141 #if !defined(CONFIG_USER_ONLY)
142 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
143 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
145 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
151 /* Size of the L2 (and L3, etc) page tables. */
153 #define L2_SIZE (1 << L2_BITS)
155 /* The bits remaining after N lower levels of page tables. */
156 #define P_L1_BITS_REM \
157 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 #define V_L1_BITS_REM \
159 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
161 /* Size of the L1 page table. Avoid silly small sizes. */
162 #if P_L1_BITS_REM < 4
163 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
165 #define P_L1_BITS P_L1_BITS_REM
168 #if V_L1_BITS_REM < 4
169 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171 #define V_L1_BITS V_L1_BITS_REM
174 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
175 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
180 unsigned long qemu_real_host_page_size
;
181 unsigned long qemu_host_page_bits
;
182 unsigned long qemu_host_page_size
;
183 unsigned long qemu_host_page_mask
;
185 /* This is a multi-level map on the virtual address space.
186 The bottom level has pointers to PageDesc. */
187 static void *l1_map
[V_L1_SIZE
];
189 #if !defined(CONFIG_USER_ONLY)
190 typedef struct PhysPageDesc
{
191 /* offset in host memory of the page + io_index in the low bits */
192 ram_addr_t phys_offset
;
193 ram_addr_t region_offset
;
196 /* This is a multi-level map on the physical address space.
197 The bottom level has pointers to PhysPageDesc. */
198 static void *l1_phys_map
[P_L1_SIZE
];
200 static void io_mem_init(void);
202 /* io memory support */
203 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
204 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
205 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
206 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
207 static int io_mem_watch
;
212 static const char *logfilename
= "qemu.log";
214 static const char *logfilename
= "/tmp/qemu.log";
218 static int log_append
= 0;
221 #if !defined(CONFIG_USER_ONLY)
222 static int tlb_flush_count
;
224 static int tb_flush_count
;
225 static int tb_phys_invalidate_count
;
228 static void map_exec(void *addr
, long size
)
231 VirtualProtect(addr
, size
,
232 PAGE_EXECUTE_READWRITE
, &old_protect
);
236 static void map_exec(void *addr
, long size
)
238 unsigned long start
, end
, page_size
;
240 page_size
= getpagesize();
241 start
= (unsigned long)addr
;
242 start
&= ~(page_size
- 1);
244 end
= (unsigned long)addr
+ size
;
245 end
+= page_size
- 1;
246 end
&= ~(page_size
- 1);
248 mprotect((void *)start
, end
- start
,
249 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
253 static void page_init(void)
255 /* NOTE: we can always suppose that qemu_host_page_size >=
259 SYSTEM_INFO system_info
;
261 GetSystemInfo(&system_info
);
262 qemu_real_host_page_size
= system_info
.dwPageSize
;
265 qemu_real_host_page_size
= getpagesize();
267 if (qemu_host_page_size
== 0)
268 qemu_host_page_size
= qemu_real_host_page_size
;
269 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
270 qemu_host_page_size
= TARGET_PAGE_SIZE
;
271 qemu_host_page_bits
= 0;
272 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
273 qemu_host_page_bits
++;
274 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
276 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
278 #ifdef HAVE_KINFO_GETVMMAP
279 struct kinfo_vmentry
*freep
;
282 freep
= kinfo_getvmmap(getpid(), &cnt
);
285 for (i
= 0; i
< cnt
; i
++) {
286 unsigned long startaddr
, endaddr
;
288 startaddr
= freep
[i
].kve_start
;
289 endaddr
= freep
[i
].kve_end
;
290 if (h2g_valid(startaddr
)) {
291 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
293 if (h2g_valid(endaddr
)) {
294 endaddr
= h2g(endaddr
);
295 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
297 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
299 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
310 last_brk
= (unsigned long)sbrk(0);
312 f
= fopen("/compat/linux/proc/self/maps", "r");
317 unsigned long startaddr
, endaddr
;
320 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
322 if (n
== 2 && h2g_valid(startaddr
)) {
323 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
325 if (h2g_valid(endaddr
)) {
326 endaddr
= h2g(endaddr
);
330 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
342 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
348 #if defined(CONFIG_USER_ONLY)
349 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
350 # define ALLOC(P, SIZE) \
352 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
353 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
356 # define ALLOC(P, SIZE) \
357 do { P = qemu_mallocz(SIZE); } while (0)
360 /* Level 1. Always allocated. */
361 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
364 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
371 ALLOC(p
, sizeof(void *) * L2_SIZE
);
375 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
383 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
389 return pd
+ (index
& (L2_SIZE
- 1));
392 static inline PageDesc
*page_find(tb_page_addr_t index
)
394 return page_find_alloc(index
, 0);
397 #if !defined(CONFIG_USER_ONLY)
398 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
404 /* Level 1. Always allocated. */
405 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
408 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
414 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
416 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
427 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
429 for (i
= 0; i
< L2_SIZE
; i
++) {
430 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
431 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
435 return pd
+ (index
& (L2_SIZE
- 1));
438 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
440 return phys_page_find_alloc(index
, 0);
443 static void tlb_protect_code(ram_addr_t ram_addr
);
444 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
446 #define mmap_lock() do { } while(0)
447 #define mmap_unlock() do { } while(0)
450 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
452 #if defined(CONFIG_USER_ONLY)
453 /* Currently it is not recommended to allocate big chunks of data in
454 user mode. It will change when a dedicated libc will be used */
455 #define USE_STATIC_CODE_GEN_BUFFER
458 #ifdef USE_STATIC_CODE_GEN_BUFFER
459 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
460 __attribute__((aligned (CODE_GEN_ALIGN
)));
463 static void code_gen_alloc(unsigned long tb_size
)
465 #ifdef USE_STATIC_CODE_GEN_BUFFER
466 code_gen_buffer
= static_code_gen_buffer
;
467 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
468 map_exec(code_gen_buffer
, code_gen_buffer_size
);
470 code_gen_buffer_size
= tb_size
;
471 if (code_gen_buffer_size
== 0) {
472 #if defined(CONFIG_USER_ONLY)
473 /* in user mode, phys_ram_size is not meaningful */
474 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
476 /* XXX: needs adjustments */
477 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
480 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
481 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
482 /* The code gen buffer location may have constraints depending on
483 the host cpu and OS */
484 #if defined(__linux__)
489 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
490 #if defined(__x86_64__)
492 /* Cannot map more than that */
493 if (code_gen_buffer_size
> (800 * 1024 * 1024))
494 code_gen_buffer_size
= (800 * 1024 * 1024);
495 #elif defined(__sparc_v9__)
496 // Map the buffer below 2G, so we can use direct calls and branches
498 start
= (void *) 0x60000000UL
;
499 if (code_gen_buffer_size
> (512 * 1024 * 1024))
500 code_gen_buffer_size
= (512 * 1024 * 1024);
501 #elif defined(__arm__)
502 /* Map the buffer below 32M, so we can use direct calls and branches */
504 start
= (void *) 0x01000000UL
;
505 if (code_gen_buffer_size
> 16 * 1024 * 1024)
506 code_gen_buffer_size
= 16 * 1024 * 1024;
507 #elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
513 start
= (void *)0x90000000UL
;
515 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
516 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
518 if (code_gen_buffer
== MAP_FAILED
) {
519 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
523 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 || defined(__DragonFly__) || defined(__OpenBSD__)
528 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
529 #if defined(__x86_64__)
530 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
531 * 0x40000000 is free */
533 addr
= (void *)0x40000000;
534 /* Cannot map more than that */
535 if (code_gen_buffer_size
> (800 * 1024 * 1024))
536 code_gen_buffer_size
= (800 * 1024 * 1024);
537 #elif defined(__sparc_v9__)
538 // Map the buffer below 2G, so we can use direct calls and branches
540 addr
= (void *) 0x60000000UL
;
541 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
542 code_gen_buffer_size
= (512 * 1024 * 1024);
545 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
546 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
548 if (code_gen_buffer
== MAP_FAILED
) {
549 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
554 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
555 map_exec(code_gen_buffer
, code_gen_buffer_size
);
557 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
558 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
559 code_gen_buffer_max_size
= code_gen_buffer_size
-
560 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
561 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
562 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
565 /* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
568 void cpu_exec_init_all(unsigned long tb_size
)
571 code_gen_alloc(tb_size
);
572 code_gen_ptr
= code_gen_buffer
;
574 #if !defined(CONFIG_USER_ONLY)
577 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 /* There's no guest base to take into account, so go ahead and
579 initialize the prologue now. */
580 tcg_prologue_init(&tcg_ctx
);
584 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
586 static int cpu_common_post_load(void *opaque
, int version_id
)
588 CPUState
*env
= opaque
;
590 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
591 version_id is increased. */
592 env
->interrupt_request
&= ~0x01;
598 static const VMStateDescription vmstate_cpu_common
= {
599 .name
= "cpu_common",
601 .minimum_version_id
= 1,
602 .minimum_version_id_old
= 1,
603 .post_load
= cpu_common_post_load
,
604 .fields
= (VMStateField
[]) {
605 VMSTATE_UINT32(halted
, CPUState
),
606 VMSTATE_UINT32(interrupt_request
, CPUState
),
607 VMSTATE_END_OF_LIST()
612 CPUState
*qemu_get_cpu(int cpu
)
614 CPUState
*env
= first_cpu
;
617 if (env
->cpu_index
== cpu
)
625 void cpu_exec_init(CPUState
*env
)
630 #if defined(CONFIG_USER_ONLY)
633 env
->next_cpu
= NULL
;
636 while (*penv
!= NULL
) {
637 penv
= &(*penv
)->next_cpu
;
640 env
->cpu_index
= cpu_index
;
642 QTAILQ_INIT(&env
->breakpoints
);
643 QTAILQ_INIT(&env
->watchpoints
);
644 #ifndef CONFIG_USER_ONLY
645 env
->thread_id
= qemu_get_thread_id();
648 #if defined(CONFIG_USER_ONLY)
651 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
652 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
653 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
654 cpu_save
, cpu_load
, env
);
658 /* Allocate a new translation block. Flush the translation buffer if
659 too many translation blocks or too much generated code. */
660 static TranslationBlock
*tb_alloc(target_ulong pc
)
662 TranslationBlock
*tb
;
664 if (nb_tbs
>= code_gen_max_blocks
||
665 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
673 void tb_free(TranslationBlock
*tb
)
675 /* In practice this is mostly used for single use temporary TB
676 Ignore the hard cases and just back up if this TB happens to
677 be the last one generated. */
678 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
679 code_gen_ptr
= tb
->tc_ptr
;
684 static inline void invalidate_page_bitmap(PageDesc
*p
)
686 if (p
->code_bitmap
) {
687 qemu_free(p
->code_bitmap
);
688 p
->code_bitmap
= NULL
;
690 p
->code_write_count
= 0;
693 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
695 static void page_flush_tb_1 (int level
, void **lp
)
704 for (i
= 0; i
< L2_SIZE
; ++i
) {
705 pd
[i
].first_tb
= NULL
;
706 invalidate_page_bitmap(pd
+ i
);
710 for (i
= 0; i
< L2_SIZE
; ++i
) {
711 page_flush_tb_1 (level
- 1, pp
+ i
);
716 static void page_flush_tb(void)
719 for (i
= 0; i
< V_L1_SIZE
; i
++) {
720 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
724 /* flush all the translation blocks */
725 /* XXX: tb_flush is currently not thread safe */
726 void tb_flush(CPUState
*env1
)
729 #if defined(DEBUG_FLUSH)
730 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
731 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
733 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
735 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
736 cpu_abort(env1
, "Internal error: code buffer overflow\n");
740 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
741 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
744 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
747 code_gen_ptr
= code_gen_buffer
;
748 /* XXX: flush processor icache at this point if cache flush is
753 #ifdef DEBUG_TB_CHECK
755 static void tb_invalidate_check(target_ulong address
)
757 TranslationBlock
*tb
;
759 address
&= TARGET_PAGE_MASK
;
760 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
761 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
762 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
763 address
>= tb
->pc
+ tb
->size
)) {
764 printf("ERROR invalidate: address=" TARGET_FMT_lx
765 " PC=%08lx size=%04x\n",
766 address
, (long)tb
->pc
, tb
->size
);
772 /* verify that all the pages have correct rights for code */
773 static void tb_page_check(void)
775 TranslationBlock
*tb
;
776 int i
, flags1
, flags2
;
778 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
779 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
780 flags1
= page_get_flags(tb
->pc
);
781 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
782 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
783 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
784 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
792 /* invalidate one TB */
793 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
796 TranslationBlock
*tb1
;
800 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
803 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
807 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
809 TranslationBlock
*tb1
;
815 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
817 *ptb
= tb1
->page_next
[n1
];
820 ptb
= &tb1
->page_next
[n1
];
824 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
826 TranslationBlock
*tb1
, **ptb
;
829 ptb
= &tb
->jmp_next
[n
];
832 /* find tb(n) in circular list */
836 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
837 if (n1
== n
&& tb1
== tb
)
840 ptb
= &tb1
->jmp_first
;
842 ptb
= &tb1
->jmp_next
[n1
];
845 /* now we can suppress tb(n) from the list */
846 *ptb
= tb
->jmp_next
[n
];
848 tb
->jmp_next
[n
] = NULL
;
852 /* reset the jump entry 'n' of a TB so that it is not chained to
854 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
856 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
859 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
864 tb_page_addr_t phys_pc
;
865 TranslationBlock
*tb1
, *tb2
;
867 /* remove the TB from the hash list */
868 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
869 h
= tb_phys_hash_func(phys_pc
);
870 tb_remove(&tb_phys_hash
[h
], tb
,
871 offsetof(TranslationBlock
, phys_hash_next
));
873 /* remove the TB from the page list */
874 if (tb
->page_addr
[0] != page_addr
) {
875 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
876 tb_page_remove(&p
->first_tb
, tb
);
877 invalidate_page_bitmap(p
);
879 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
880 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
881 tb_page_remove(&p
->first_tb
, tb
);
882 invalidate_page_bitmap(p
);
885 tb_invalidated_flag
= 1;
887 /* remove the TB from the hash list */
888 h
= tb_jmp_cache_hash_func(tb
->pc
);
889 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
890 if (env
->tb_jmp_cache
[h
] == tb
)
891 env
->tb_jmp_cache
[h
] = NULL
;
894 /* suppress this TB from the two jump lists */
895 tb_jmp_remove(tb
, 0);
896 tb_jmp_remove(tb
, 1);
898 /* suppress any remaining jumps to this TB */
904 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
905 tb2
= tb1
->jmp_next
[n1
];
906 tb_reset_jump(tb1
, n1
);
907 tb1
->jmp_next
[n1
] = NULL
;
910 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
912 tb_phys_invalidate_count
++;
915 static inline void set_bits(uint8_t *tab
, int start
, int len
)
921 mask
= 0xff << (start
& 7);
922 if ((start
& ~7) == (end
& ~7)) {
924 mask
&= ~(0xff << (end
& 7));
929 start
= (start
+ 8) & ~7;
931 while (start
< end1
) {
936 mask
= ~(0xff << (end
& 7));
942 static void build_page_bitmap(PageDesc
*p
)
944 int n
, tb_start
, tb_end
;
945 TranslationBlock
*tb
;
947 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
952 tb
= (TranslationBlock
*)((long)tb
& ~3);
953 /* NOTE: this is subtle as a TB may span two physical pages */
955 /* NOTE: tb_end may be after the end of the page, but
956 it is not a problem */
957 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
958 tb_end
= tb_start
+ tb
->size
;
959 if (tb_end
> TARGET_PAGE_SIZE
)
960 tb_end
= TARGET_PAGE_SIZE
;
963 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
965 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
966 tb
= tb
->page_next
[n
];
970 TranslationBlock
*tb_gen_code(CPUState
*env
,
971 target_ulong pc
, target_ulong cs_base
,
972 int flags
, int cflags
)
974 TranslationBlock
*tb
;
976 tb_page_addr_t phys_pc
, phys_page2
;
977 target_ulong virt_page2
;
980 phys_pc
= get_page_addr_code(env
, pc
);
983 /* flush must be done */
985 /* cannot fail at this point */
987 /* Don't forget to invalidate previous TB info. */
988 tb_invalidated_flag
= 1;
990 tc_ptr
= code_gen_ptr
;
992 tb
->cs_base
= cs_base
;
995 cpu_gen_code(env
, tb
, &code_gen_size
);
996 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
998 /* check next page if needed */
999 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1001 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1002 phys_page2
= get_page_addr_code(env
, virt_page2
);
1004 tb_link_page(tb
, phys_pc
, phys_page2
);
1008 /* invalidate all TBs which intersect with the target physical page
1009 starting in range [start;end[. NOTE: start and end must refer to
1010 the same physical page. 'is_cpu_write_access' should be true if called
1011 from a real cpu write access: the virtual CPU will exit the current
1012 TB if code is modified inside this TB. */
1013 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1014 int is_cpu_write_access
)
1016 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1017 CPUState
*env
= cpu_single_env
;
1018 tb_page_addr_t tb_start
, tb_end
;
1021 #ifdef TARGET_HAS_PRECISE_SMC
1022 int current_tb_not_found
= is_cpu_write_access
;
1023 TranslationBlock
*current_tb
= NULL
;
1024 int current_tb_modified
= 0;
1025 target_ulong current_pc
= 0;
1026 target_ulong current_cs_base
= 0;
1027 int current_flags
= 0;
1028 #endif /* TARGET_HAS_PRECISE_SMC */
1030 p
= page_find(start
>> TARGET_PAGE_BITS
);
1033 if (!p
->code_bitmap
&&
1034 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1035 is_cpu_write_access
) {
1036 /* build code bitmap */
1037 build_page_bitmap(p
);
1040 /* we remove all the TBs in the range [start, end[ */
1041 /* XXX: see if in some cases it could be faster to invalidate all the code */
1043 while (tb
!= NULL
) {
1045 tb
= (TranslationBlock
*)((long)tb
& ~3);
1046 tb_next
= tb
->page_next
[n
];
1047 /* NOTE: this is subtle as a TB may span two physical pages */
1049 /* NOTE: tb_end may be after the end of the page, but
1050 it is not a problem */
1051 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1052 tb_end
= tb_start
+ tb
->size
;
1054 tb_start
= tb
->page_addr
[1];
1055 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1057 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1058 #ifdef TARGET_HAS_PRECISE_SMC
1059 if (current_tb_not_found
) {
1060 current_tb_not_found
= 0;
1062 if (env
->mem_io_pc
) {
1063 /* now we have a real cpu fault */
1064 current_tb
= tb_find_pc(env
->mem_io_pc
);
1067 if (current_tb
== tb
&&
1068 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1069 /* If we are modifying the current TB, we must stop
1070 its execution. We could be more precise by checking
1071 that the modification is after the current PC, but it
1072 would require a specialized function to partially
1073 restore the CPU state */
1075 current_tb_modified
= 1;
1076 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1077 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1080 #endif /* TARGET_HAS_PRECISE_SMC */
1081 /* we need to do that to handle the case where a signal
1082 occurs while doing tb_phys_invalidate() */
1085 saved_tb
= env
->current_tb
;
1086 env
->current_tb
= NULL
;
1088 tb_phys_invalidate(tb
, -1);
1090 env
->current_tb
= saved_tb
;
1091 if (env
->interrupt_request
&& env
->current_tb
)
1092 cpu_interrupt(env
, env
->interrupt_request
);
1097 #if !defined(CONFIG_USER_ONLY)
1098 /* if no code remaining, no need to continue to use slow writes */
1100 invalidate_page_bitmap(p
);
1101 if (is_cpu_write_access
) {
1102 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (current_tb_modified
) {
1108 /* we generate a block containing just the instruction
1109 modifying the memory. It will ensure that it cannot modify
1111 env
->current_tb
= NULL
;
1112 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1113 cpu_resume_from_signal(env
, NULL
);
1118 /* len must be <= 8 and start must be a multiple of len */
1119 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1125 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1126 cpu_single_env
->mem_io_vaddr
, len
,
1127 cpu_single_env
->eip
,
1128 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1131 p
= page_find(start
>> TARGET_PAGE_BITS
);
1134 if (p
->code_bitmap
) {
1135 offset
= start
& ~TARGET_PAGE_MASK
;
1136 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1137 if (b
& ((1 << len
) - 1))
1141 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1145 #if !defined(CONFIG_SOFTMMU)
1146 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1147 unsigned long pc
, void *puc
)
1149 TranslationBlock
*tb
;
1152 #ifdef TARGET_HAS_PRECISE_SMC
1153 TranslationBlock
*current_tb
= NULL
;
1154 CPUState
*env
= cpu_single_env
;
1155 int current_tb_modified
= 0;
1156 target_ulong current_pc
= 0;
1157 target_ulong current_cs_base
= 0;
1158 int current_flags
= 0;
1161 addr
&= TARGET_PAGE_MASK
;
1162 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1166 #ifdef TARGET_HAS_PRECISE_SMC
1167 if (tb
&& pc
!= 0) {
1168 current_tb
= tb_find_pc(pc
);
1171 while (tb
!= NULL
) {
1173 tb
= (TranslationBlock
*)((long)tb
& ~3);
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (current_tb
== tb
&&
1176 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1177 /* If we are modifying the current TB, we must stop
1178 its execution. We could be more precise by checking
1179 that the modification is after the current PC, but it
1180 would require a specialized function to partially
1181 restore the CPU state */
1183 current_tb_modified
= 1;
1184 cpu_restore_state(current_tb
, env
, pc
);
1185 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1188 #endif /* TARGET_HAS_PRECISE_SMC */
1189 tb_phys_invalidate(tb
, addr
);
1190 tb
= tb
->page_next
[n
];
1193 #ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_modified
) {
1195 /* we generate a block containing just the instruction
1196 modifying the memory. It will ensure that it cannot modify
1198 env
->current_tb
= NULL
;
1199 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1200 cpu_resume_from_signal(env
, puc
);
1206 /* add the tb in the target page and protect it if necessary */
1207 static inline void tb_alloc_page(TranslationBlock
*tb
,
1208 unsigned int n
, tb_page_addr_t page_addr
)
1211 #ifndef CONFIG_USER_ONLY
1212 bool page_already_protected
;
1215 tb
->page_addr
[n
] = page_addr
;
1216 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1217 tb
->page_next
[n
] = p
->first_tb
;
1218 #ifndef CONFIG_USER_ONLY
1219 page_already_protected
= p
->first_tb
!= NULL
;
1221 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1222 invalidate_page_bitmap(p
);
1224 #if defined(TARGET_HAS_SMC) || 1
1226 #if defined(CONFIG_USER_ONLY)
1227 if (p
->flags
& PAGE_WRITE
) {
1232 /* force the host page as non writable (writes will have a
1233 page fault + mprotect overhead) */
1234 page_addr
&= qemu_host_page_mask
;
1236 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1237 addr
+= TARGET_PAGE_SIZE
) {
1239 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1243 p2
->flags
&= ~PAGE_WRITE
;
1245 mprotect(g2h(page_addr
), qemu_host_page_size
,
1246 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1247 #ifdef DEBUG_TB_INVALIDATE
1248 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1253 /* if some code is already present, then the pages are already
1254 protected. So we handle the case where only the first TB is
1255 allocated in a physical page */
1256 if (!page_already_protected
) {
1257 tlb_protect_code(page_addr
);
1261 #endif /* TARGET_HAS_SMC */
1264 /* add a new TB and link it to the physical page tables. phys_page2 is
1265 (-1) to indicate that only one page contains the TB. */
1266 void tb_link_page(TranslationBlock
*tb
,
1267 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1270 TranslationBlock
**ptb
;
1272 /* Grab the mmap lock to stop another thread invalidating this TB
1273 before we are done. */
1275 /* add in the physical hash table */
1276 h
= tb_phys_hash_func(phys_pc
);
1277 ptb
= &tb_phys_hash
[h
];
1278 tb
->phys_hash_next
= *ptb
;
1281 /* add in the page list */
1282 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1283 if (phys_page2
!= -1)
1284 tb_alloc_page(tb
, 1, phys_page2
);
1286 tb
->page_addr
[1] = -1;
1288 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1289 tb
->jmp_next
[0] = NULL
;
1290 tb
->jmp_next
[1] = NULL
;
1292 /* init original jump addresses */
1293 if (tb
->tb_next_offset
[0] != 0xffff)
1294 tb_reset_jump(tb
, 0);
1295 if (tb
->tb_next_offset
[1] != 0xffff)
1296 tb_reset_jump(tb
, 1);
1298 #ifdef DEBUG_TB_CHECK
1304 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1305 tb[1].tc_ptr. Return NULL if not found */
1306 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1308 int m_min
, m_max
, m
;
1310 TranslationBlock
*tb
;
1314 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1315 tc_ptr
>= (unsigned long)code_gen_ptr
)
1317 /* binary search (cf Knuth) */
1320 while (m_min
<= m_max
) {
1321 m
= (m_min
+ m_max
) >> 1;
1323 v
= (unsigned long)tb
->tc_ptr
;
1326 else if (tc_ptr
< v
) {
1335 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1337 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1339 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1342 tb1
= tb
->jmp_next
[n
];
1344 /* find head of list */
1347 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1350 tb1
= tb1
->jmp_next
[n1
];
1352 /* we are now sure now that tb jumps to tb1 */
1355 /* remove tb from the jmp_first list */
1356 ptb
= &tb_next
->jmp_first
;
1360 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1361 if (n1
== n
&& tb1
== tb
)
1363 ptb
= &tb1
->jmp_next
[n1
];
1365 *ptb
= tb
->jmp_next
[n
];
1366 tb
->jmp_next
[n
] = NULL
;
1368 /* suppress the jump to next tb in generated code */
1369 tb_reset_jump(tb
, n
);
1371 /* suppress jumps in the tb on which we could have jumped */
1372 tb_reset_jump_recursive(tb_next
);
1376 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1378 tb_reset_jump_recursive2(tb
, 0);
1379 tb_reset_jump_recursive2(tb
, 1);
1382 #if defined(TARGET_HAS_ICE)
1383 #if defined(CONFIG_USER_ONLY)
1384 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1386 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1389 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1391 target_phys_addr_t addr
;
1393 ram_addr_t ram_addr
;
1396 addr
= cpu_get_phys_page_debug(env
, pc
);
1397 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1399 pd
= IO_MEM_UNASSIGNED
;
1401 pd
= p
->phys_offset
;
1403 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1404 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1407 #endif /* TARGET_HAS_ICE */
1409 #if defined(CONFIG_USER_ONLY)
1410 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1415 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1416 int flags
, CPUWatchpoint
**watchpoint
)
1421 /* Add a watchpoint. */
1422 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1423 int flags
, CPUWatchpoint
**watchpoint
)
1425 target_ulong len_mask
= ~(len
- 1);
1428 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1429 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1430 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1431 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1434 wp
= qemu_malloc(sizeof(*wp
));
1437 wp
->len_mask
= len_mask
;
1440 /* keep all GDB-injected watchpoints in front */
1442 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1444 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1446 tlb_flush_page(env
, addr
);
1453 /* Remove a specific watchpoint. */
1454 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1457 target_ulong len_mask
= ~(len
- 1);
1460 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1461 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1462 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1463 cpu_watchpoint_remove_by_ref(env
, wp
);
1470 /* Remove a specific watchpoint by reference. */
1471 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1473 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1475 tlb_flush_page(env
, watchpoint
->vaddr
);
1477 qemu_free(watchpoint
);
1480 /* Remove all matching watchpoints. */
1481 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1483 CPUWatchpoint
*wp
, *next
;
1485 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1486 if (wp
->flags
& mask
)
1487 cpu_watchpoint_remove_by_ref(env
, wp
);
1492 /* Add a breakpoint. */
1493 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1494 CPUBreakpoint
**breakpoint
)
1496 #if defined(TARGET_HAS_ICE)
1499 bp
= qemu_malloc(sizeof(*bp
));
1504 /* keep all GDB-injected breakpoints in front */
1506 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1508 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1510 breakpoint_invalidate(env
, pc
);
1520 /* Remove a specific breakpoint. */
1521 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1523 #if defined(TARGET_HAS_ICE)
1526 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1527 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1528 cpu_breakpoint_remove_by_ref(env
, bp
);
1538 /* Remove a specific breakpoint by reference. */
1539 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1541 #if defined(TARGET_HAS_ICE)
1542 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1544 breakpoint_invalidate(env
, breakpoint
->pc
);
1546 qemu_free(breakpoint
);
1550 /* Remove all matching breakpoints. */
1551 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1553 #if defined(TARGET_HAS_ICE)
1554 CPUBreakpoint
*bp
, *next
;
1556 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1557 if (bp
->flags
& mask
)
1558 cpu_breakpoint_remove_by_ref(env
, bp
);
1563 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1564 CPU loop after each instruction */
1565 void cpu_single_step(CPUState
*env
, int enabled
)
1567 #if defined(TARGET_HAS_ICE)
1568 if (env
->singlestep_enabled
!= enabled
) {
1569 env
->singlestep_enabled
= enabled
;
1571 kvm_update_guest_debug(env
, 0);
1573 /* must flush all the translated code to avoid inconsistencies */
1574 /* XXX: only flush what is necessary */
1581 /* enable or disable low levels log */
1582 void cpu_set_log(int log_flags
)
1584 loglevel
= log_flags
;
1585 if (loglevel
&& !logfile
) {
1586 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1588 perror(logfilename
);
1591 #if !defined(CONFIG_SOFTMMU)
1592 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1594 static char logfile_buf
[4096];
1595 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1597 #elif !defined(_WIN32)
1598 /* Win32 doesn't support line-buffering and requires size >= 2 */
1599 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1603 if (!loglevel
&& logfile
) {
1609 void cpu_set_log_filename(const char *filename
)
1611 logfilename
= strdup(filename
);
1616 cpu_set_log(loglevel
);
1619 static void cpu_unlink_tb(CPUState
*env
)
1621 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1622 problem and hope the cpu will stop of its own accord. For userspace
1623 emulation this often isn't actually as bad as it sounds. Often
1624 signals are used primarily to interrupt blocking syscalls. */
1625 TranslationBlock
*tb
;
1626 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1628 spin_lock(&interrupt_lock
);
1629 tb
= env
->current_tb
;
1630 /* if the cpu is currently executing code, we must unlink it and
1631 all the potentially executing TB */
1633 env
->current_tb
= NULL
;
1634 tb_reset_jump_recursive(tb
);
1636 spin_unlock(&interrupt_lock
);
1639 #ifndef CONFIG_USER_ONLY
1640 /* mask must never be zero, except for A20 change call */
1641 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1645 old_mask
= env
->interrupt_request
;
1646 env
->interrupt_request
|= mask
;
1649 * If called from iothread context, wake the target cpu in
1652 if (!qemu_cpu_is_self(env
)) {
1658 env
->icount_decr
.u16
.high
= 0xffff;
1660 && (mask
& ~old_mask
) != 0) {
1661 cpu_abort(env
, "Raised interrupt while not in I/O function");
1668 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1670 #else /* CONFIG_USER_ONLY */
1672 void cpu_interrupt(CPUState
*env
, int mask
)
1674 env
->interrupt_request
|= mask
;
1677 #endif /* CONFIG_USER_ONLY */
1679 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1681 env
->interrupt_request
&= ~mask
;
1684 void cpu_exit(CPUState
*env
)
1686 env
->exit_request
= 1;
1690 const CPULogItem cpu_log_items
[] = {
1691 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1692 "show generated host assembly code for each compiled TB" },
1693 { CPU_LOG_TB_IN_ASM
, "in_asm",
1694 "show target assembly code for each compiled TB" },
1695 { CPU_LOG_TB_OP
, "op",
1696 "show micro ops for each compiled TB" },
1697 { CPU_LOG_TB_OP_OPT
, "op_opt",
1700 "before eflags optimization and "
1702 "after liveness analysis" },
1703 { CPU_LOG_INT
, "int",
1704 "show interrupts/exceptions in short format" },
1705 { CPU_LOG_EXEC
, "exec",
1706 "show trace before each executed TB (lots of logs)" },
1707 { CPU_LOG_TB_CPU
, "cpu",
1708 "show CPU state before block translation" },
1710 { CPU_LOG_PCALL
, "pcall",
1711 "show protected mode far calls/returns/exceptions" },
1712 { CPU_LOG_RESET
, "cpu_reset",
1713 "show CPU state before CPU resets" },
1716 { CPU_LOG_IOPORT
, "ioport",
1717 "show all i/o ports accesses" },
1722 #ifndef CONFIG_USER_ONLY
1723 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1724 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1726 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1728 ram_addr_t phys_offset
,
1731 CPUPhysMemoryClient
*client
;
1732 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1733 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1737 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1738 target_phys_addr_t end
)
1740 CPUPhysMemoryClient
*client
;
1741 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1742 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1749 static int cpu_notify_migration_log(int enable
)
1751 CPUPhysMemoryClient
*client
;
1752 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1753 int r
= client
->migration_log(client
, enable
);
1761 target_phys_addr_t start_addr
;
1763 ram_addr_t phys_offset
;
1766 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1767 * address. Each intermediate table provides the next L2_BITs of guest
1768 * physical address space. The number of levels vary based on host and
1769 * guest configuration, making it efficient to build the final guest
1770 * physical address by seeding the L1 offset and shifting and adding in
1771 * each L2 offset as we recurse through them. */
1772 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
, int level
,
1773 void **lp
, target_phys_addr_t addr
,
1774 struct last_map
*map
)
1782 PhysPageDesc
*pd
= *lp
;
1783 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1784 for (i
= 0; i
< L2_SIZE
; ++i
) {
1785 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1786 target_phys_addr_t start_addr
= addr
| i
<< TARGET_PAGE_BITS
;
1789 start_addr
== map
->start_addr
+ map
->size
&&
1790 pd
[i
].phys_offset
== map
->phys_offset
+ map
->size
) {
1792 map
->size
+= TARGET_PAGE_SIZE
;
1794 } else if (map
->size
) {
1795 client
->set_memory(client
, map
->start_addr
,
1796 map
->size
, map
->phys_offset
, false);
1799 map
->start_addr
= start_addr
;
1800 map
->size
= TARGET_PAGE_SIZE
;
1801 map
->phys_offset
= pd
[i
].phys_offset
;
1806 for (i
= 0; i
< L2_SIZE
; ++i
) {
1807 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1808 (addr
<< L2_BITS
) | i
, map
);
1813 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1816 struct last_map map
= { };
1818 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1819 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1820 l1_phys_map
+ i
, i
, &map
);
1823 client
->set_memory(client
, map
.start_addr
, map
.size
, map
.phys_offset
,
1828 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1830 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1831 phys_page_for_each(client
);
1834 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1836 QLIST_REMOVE(client
, list
);
1840 static int cmp1(const char *s1
, int n
, const char *s2
)
1842 if (strlen(s2
) != n
)
1844 return memcmp(s1
, s2
, n
) == 0;
1847 /* takes a comma separated list of log masks. Return 0 if error. */
1848 int cpu_str_to_log_mask(const char *str
)
1850 const CPULogItem
*item
;
1857 p1
= strchr(p
, ',');
1860 if(cmp1(p
,p1
-p
,"all")) {
1861 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1865 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1866 if (cmp1(p
, p1
- p
, item
->name
))
1880 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1887 fprintf(stderr
, "qemu: fatal: ");
1888 vfprintf(stderr
, fmt
, ap
);
1889 fprintf(stderr
, "\n");
1891 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1893 cpu_dump_state(env
, stderr
, fprintf
, 0);
1895 if (qemu_log_enabled()) {
1896 qemu_log("qemu: fatal: ");
1897 qemu_log_vprintf(fmt
, ap2
);
1900 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1902 log_cpu_state(env
, 0);
1909 #if defined(CONFIG_USER_ONLY)
1911 struct sigaction act
;
1912 sigfillset(&act
.sa_mask
);
1913 act
.sa_handler
= SIG_DFL
;
1914 sigaction(SIGABRT
, &act
, NULL
);
1920 CPUState
*cpu_copy(CPUState
*env
)
1922 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1923 CPUState
*next_cpu
= new_env
->next_cpu
;
1924 int cpu_index
= new_env
->cpu_index
;
1925 #if defined(TARGET_HAS_ICE)
1930 memcpy(new_env
, env
, sizeof(CPUState
));
1932 /* Preserve chaining and index. */
1933 new_env
->next_cpu
= next_cpu
;
1934 new_env
->cpu_index
= cpu_index
;
1936 /* Clone all break/watchpoints.
1937 Note: Once we support ptrace with hw-debug register access, make sure
1938 BP_CPU break/watchpoints are handled correctly on clone. */
1939 QTAILQ_INIT(&env
->breakpoints
);
1940 QTAILQ_INIT(&env
->watchpoints
);
1941 #if defined(TARGET_HAS_ICE)
1942 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1943 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1945 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1946 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1954 #if !defined(CONFIG_USER_ONLY)
1956 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1960 /* Discard jump cache entries for any tb which might potentially
1961 overlap the flushed page. */
1962 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1963 memset (&env
->tb_jmp_cache
[i
], 0,
1964 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1966 i
= tb_jmp_cache_hash_page(addr
);
1967 memset (&env
->tb_jmp_cache
[i
], 0,
1968 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1971 static CPUTLBEntry s_cputlb_empty_entry
= {
1978 /* NOTE: if flush_global is true, also flush global entries (not
1980 void tlb_flush(CPUState
*env
, int flush_global
)
1984 #if defined(DEBUG_TLB)
1985 printf("tlb_flush:\n");
1987 /* must reset current TB so that interrupts cannot modify the
1988 links while we are modifying them */
1989 env
->current_tb
= NULL
;
1991 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1993 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1994 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1998 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
2000 env
->tlb_flush_addr
= -1;
2001 env
->tlb_flush_mask
= 0;
2005 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
2007 if (addr
== (tlb_entry
->addr_read
&
2008 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2009 addr
== (tlb_entry
->addr_write
&
2010 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2011 addr
== (tlb_entry
->addr_code
&
2012 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
2013 *tlb_entry
= s_cputlb_empty_entry
;
2017 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2022 #if defined(DEBUG_TLB)
2023 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
2025 /* Check if we need to flush due to large pages. */
2026 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
2027 #if defined(DEBUG_TLB)
2028 printf("tlb_flush_page: forced full flush ("
2029 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
2030 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2035 /* must reset current TB so that interrupts cannot modify the
2036 links while we are modifying them */
2037 env
->current_tb
= NULL
;
2039 addr
&= TARGET_PAGE_MASK
;
2040 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2041 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2042 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2044 tlb_flush_jmp_cache(env
, addr
);
2047 /* update the TLBs so that writes to code in the virtual page 'addr'
2049 static void tlb_protect_code(ram_addr_t ram_addr
)
2051 cpu_physical_memory_reset_dirty(ram_addr
,
2052 ram_addr
+ TARGET_PAGE_SIZE
,
2056 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2057 tested for self modifying code */
2058 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2061 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2064 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2065 unsigned long start
, unsigned long length
)
2068 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2069 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2070 if ((addr
- start
) < length
) {
2071 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2076 /* Note: start and end must be within the same ram block. */
2077 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2081 unsigned long length
, start1
;
2084 start
&= TARGET_PAGE_MASK
;
2085 end
= TARGET_PAGE_ALIGN(end
);
2087 length
= end
- start
;
2090 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2092 /* we modify the TLB cache so that the dirty bit will be set again
2093 when accessing the range */
2094 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2095 /* Check that we don't span multiple blocks - this breaks the
2096 address comparisons below. */
2097 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2098 != (end
- 1) - start
) {
2102 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2104 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2105 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2106 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2112 int cpu_physical_memory_set_dirty_tracking(int enable
)
2115 in_migration
= enable
;
2116 ret
= cpu_notify_migration_log(!!enable
);
2120 int cpu_physical_memory_get_dirty_tracking(void)
2122 return in_migration
;
2125 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2126 target_phys_addr_t end_addr
)
2130 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2134 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2137 CPUPhysMemoryClient
*client
;
2138 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2139 if (client
->log_start
) {
2140 int r
= client
->log_start(client
, start_addr
, size
);
2149 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2152 CPUPhysMemoryClient
*client
;
2153 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2154 if (client
->log_stop
) {
2155 int r
= client
->log_stop(client
, start_addr
, size
);
2164 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2166 ram_addr_t ram_addr
;
2169 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2170 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2171 + tlb_entry
->addend
);
2172 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2173 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2174 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2179 /* update the TLB according to the current state of the dirty bits */
2180 void cpu_tlb_update_dirty(CPUState
*env
)
2184 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2185 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2186 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2190 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2192 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2193 tlb_entry
->addr_write
= vaddr
;
2196 /* update the TLB corresponding to virtual page vaddr
2197 so that it is no longer dirty */
2198 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2203 vaddr
&= TARGET_PAGE_MASK
;
2204 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2205 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2206 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2209 /* Our TLB does not support large pages, so remember the area covered by
2210 large pages and trigger a full TLB flush if these are invalidated. */
2211 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2214 target_ulong mask
= ~(size
- 1);
2216 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2217 env
->tlb_flush_addr
= vaddr
& mask
;
2218 env
->tlb_flush_mask
= mask
;
2221 /* Extend the existing region to include the new page.
2222 This is a compromise between unnecessary flushes and the cost
2223 of maintaining a full variable size TLB. */
2224 mask
&= env
->tlb_flush_mask
;
2225 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2228 env
->tlb_flush_addr
&= mask
;
2229 env
->tlb_flush_mask
= mask
;
2232 /* Add a new TLB entry. At most one entry for a given virtual address
2233 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2234 supplied size is only used by tlb_flush_page. */
2235 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2236 target_phys_addr_t paddr
, int prot
,
2237 int mmu_idx
, target_ulong size
)
2242 target_ulong address
;
2243 target_ulong code_address
;
2244 unsigned long addend
;
2247 target_phys_addr_t iotlb
;
2249 assert(size
>= TARGET_PAGE_SIZE
);
2250 if (size
!= TARGET_PAGE_SIZE
) {
2251 tlb_add_large_page(env
, vaddr
, size
);
2253 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2255 pd
= IO_MEM_UNASSIGNED
;
2257 pd
= p
->phys_offset
;
2259 #if defined(DEBUG_TLB)
2260 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2261 " prot=%x idx=%d pd=0x%08lx\n",
2262 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2266 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2267 /* IO memory case (romd handled later) */
2268 address
|= TLB_MMIO
;
2270 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2271 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2273 iotlb
= pd
& TARGET_PAGE_MASK
;
2274 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2275 iotlb
|= IO_MEM_NOTDIRTY
;
2277 iotlb
|= IO_MEM_ROM
;
2279 /* IO handlers are currently passed a physical address.
2280 It would be nice to pass an offset from the base address
2281 of that region. This would avoid having to special case RAM,
2282 and avoid full address decoding in every device.
2283 We can't use the high bits of pd for this because
2284 IO_MEM_ROMD uses these as a ram address. */
2285 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2287 iotlb
+= p
->region_offset
;
2293 code_address
= address
;
2294 /* Make accesses to pages with watchpoints go via the
2295 watchpoint trap routines. */
2296 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2297 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2298 /* Avoid trapping reads of pages with a write breakpoint. */
2299 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2300 iotlb
= io_mem_watch
+ paddr
;
2301 address
|= TLB_MMIO
;
2307 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2308 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2309 te
= &env
->tlb_table
[mmu_idx
][index
];
2310 te
->addend
= addend
- vaddr
;
2311 if (prot
& PAGE_READ
) {
2312 te
->addr_read
= address
;
2317 if (prot
& PAGE_EXEC
) {
2318 te
->addr_code
= code_address
;
2322 if (prot
& PAGE_WRITE
) {
2323 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2324 (pd
& IO_MEM_ROMD
)) {
2325 /* Write access calls the I/O callback. */
2326 te
->addr_write
= address
| TLB_MMIO
;
2327 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2328 !cpu_physical_memory_is_dirty(pd
)) {
2329 te
->addr_write
= address
| TLB_NOTDIRTY
;
2331 te
->addr_write
= address
;
2334 te
->addr_write
= -1;
2340 void tlb_flush(CPUState
*env
, int flush_global
)
2344 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2349 * Walks guest process memory "regions" one by one
2350 * and calls callback function 'fn' for each region.
2353 struct walk_memory_regions_data
2355 walk_memory_regions_fn fn
;
2357 unsigned long start
;
2361 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2362 abi_ulong end
, int new_prot
)
2364 if (data
->start
!= -1ul) {
2365 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2371 data
->start
= (new_prot
? end
: -1ul);
2372 data
->prot
= new_prot
;
2377 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2378 abi_ulong base
, int level
, void **lp
)
2384 return walk_memory_regions_end(data
, base
, 0);
2389 for (i
= 0; i
< L2_SIZE
; ++i
) {
2390 int prot
= pd
[i
].flags
;
2392 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2393 if (prot
!= data
->prot
) {
2394 rc
= walk_memory_regions_end(data
, pa
, prot
);
2402 for (i
= 0; i
< L2_SIZE
; ++i
) {
2403 pa
= base
| ((abi_ulong
)i
<<
2404 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2405 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2415 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2417 struct walk_memory_regions_data data
;
2425 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2426 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2427 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2433 return walk_memory_regions_end(&data
, 0, 0);
2436 static int dump_region(void *priv
, abi_ulong start
,
2437 abi_ulong end
, unsigned long prot
)
2439 FILE *f
= (FILE *)priv
;
2441 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2442 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2443 start
, end
, end
- start
,
2444 ((prot
& PAGE_READ
) ? 'r' : '-'),
2445 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2446 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2451 /* dump memory mappings */
2452 void page_dump(FILE *f
)
2454 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2455 "start", "end", "size", "prot");
2456 walk_memory_regions(f
, dump_region
);
2459 int page_get_flags(target_ulong address
)
2463 p
= page_find(address
>> TARGET_PAGE_BITS
);
2469 /* Modify the flags of a page and invalidate the code if necessary.
2470 The flag PAGE_WRITE_ORG is positioned automatically depending
2471 on PAGE_WRITE. The mmap_lock should already be held. */
2472 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2474 target_ulong addr
, len
;
2476 /* This function should never be called with addresses outside the
2477 guest address space. If this assert fires, it probably indicates
2478 a missing call to h2g_valid. */
2479 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2480 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2482 assert(start
< end
);
2484 start
= start
& TARGET_PAGE_MASK
;
2485 end
= TARGET_PAGE_ALIGN(end
);
2487 if (flags
& PAGE_WRITE
) {
2488 flags
|= PAGE_WRITE_ORG
;
2491 for (addr
= start
, len
= end
- start
;
2493 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2494 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2496 /* If the write protection bit is set, then we invalidate
2498 if (!(p
->flags
& PAGE_WRITE
) &&
2499 (flags
& PAGE_WRITE
) &&
2501 tb_invalidate_phys_page(addr
, 0, NULL
);
2507 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2513 /* This function should never be called with addresses outside the
2514 guest address space. If this assert fires, it probably indicates
2515 a missing call to h2g_valid. */
2516 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2517 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2523 if (start
+ len
- 1 < start
) {
2524 /* We've wrapped around. */
2528 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2529 start
= start
& TARGET_PAGE_MASK
;
2531 for (addr
= start
, len
= end
- start
;
2533 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2534 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2537 if( !(p
->flags
& PAGE_VALID
) )
2540 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2542 if (flags
& PAGE_WRITE
) {
2543 if (!(p
->flags
& PAGE_WRITE_ORG
))
2545 /* unprotect the page if it was put read-only because it
2546 contains translated code */
2547 if (!(p
->flags
& PAGE_WRITE
)) {
2548 if (!page_unprotect(addr
, 0, NULL
))
2557 /* called from signal handler: invalidate the code and unprotect the
2558 page. Return TRUE if the fault was successfully handled. */
2559 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2563 target_ulong host_start
, host_end
, addr
;
2565 /* Technically this isn't safe inside a signal handler. However we
2566 know this only ever happens in a synchronous SEGV handler, so in
2567 practice it seems to be ok. */
2570 p
= page_find(address
>> TARGET_PAGE_BITS
);
2576 /* if the page was really writable, then we change its
2577 protection back to writable */
2578 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2579 host_start
= address
& qemu_host_page_mask
;
2580 host_end
= host_start
+ qemu_host_page_size
;
2583 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2584 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2585 p
->flags
|= PAGE_WRITE
;
2588 /* and since the content will be modified, we must invalidate
2589 the corresponding translated code. */
2590 tb_invalidate_phys_page(addr
, pc
, puc
);
2591 #ifdef DEBUG_TB_CHECK
2592 tb_invalidate_check(addr
);
2595 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2605 static inline void tlb_set_dirty(CPUState
*env
,
2606 unsigned long addr
, target_ulong vaddr
)
2609 #endif /* defined(CONFIG_USER_ONLY) */
2611 #if !defined(CONFIG_USER_ONLY)
2613 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2614 typedef struct subpage_t
{
2615 target_phys_addr_t base
;
2616 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2617 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2620 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2621 ram_addr_t memory
, ram_addr_t region_offset
);
2622 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2623 ram_addr_t orig_memory
,
2624 ram_addr_t region_offset
);
2625 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2628 if (addr > start_addr) \
2631 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2632 if (start_addr2 > 0) \
2636 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2637 end_addr2 = TARGET_PAGE_SIZE - 1; \
2639 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2640 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2645 /* register physical memory.
2646 For RAM, 'size' must be a multiple of the target page size.
2647 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2648 io memory page. The address used when calling the IO function is
2649 the offset from the start of the region, plus region_offset. Both
2650 start_addr and region_offset are rounded down to a page boundary
2651 before calculating this offset. This should not be a problem unless
2652 the low bits of start_addr and region_offset differ. */
2653 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2655 ram_addr_t phys_offset
,
2656 ram_addr_t region_offset
,
2659 target_phys_addr_t addr
, end_addr
;
2662 ram_addr_t orig_size
= size
;
2666 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2668 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2669 region_offset
= start_addr
;
2671 region_offset
&= TARGET_PAGE_MASK
;
2672 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2673 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2677 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2678 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2679 ram_addr_t orig_memory
= p
->phys_offset
;
2680 target_phys_addr_t start_addr2
, end_addr2
;
2681 int need_subpage
= 0;
2683 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2686 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2687 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2688 &p
->phys_offset
, orig_memory
,
2691 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2694 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2696 p
->region_offset
= 0;
2698 p
->phys_offset
= phys_offset
;
2699 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2700 (phys_offset
& IO_MEM_ROMD
))
2701 phys_offset
+= TARGET_PAGE_SIZE
;
2704 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2705 p
->phys_offset
= phys_offset
;
2706 p
->region_offset
= region_offset
;
2707 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2708 (phys_offset
& IO_MEM_ROMD
)) {
2709 phys_offset
+= TARGET_PAGE_SIZE
;
2711 target_phys_addr_t start_addr2
, end_addr2
;
2712 int need_subpage
= 0;
2714 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2715 end_addr2
, need_subpage
);
2718 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2719 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2720 addr
& TARGET_PAGE_MASK
);
2721 subpage_register(subpage
, start_addr2
, end_addr2
,
2722 phys_offset
, region_offset
);
2723 p
->region_offset
= 0;
2727 region_offset
+= TARGET_PAGE_SIZE
;
2728 addr
+= TARGET_PAGE_SIZE
;
2729 } while (addr
!= end_addr
);
2731 /* since each CPU stores ram addresses in its TLB cache, we must
2732 reset the modified entries */
2734 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2739 /* XXX: temporary until new memory mapping API */
2740 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2744 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2746 return IO_MEM_UNASSIGNED
;
2747 return p
->phys_offset
;
2750 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2753 kvm_coalesce_mmio_region(addr
, size
);
2756 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2759 kvm_uncoalesce_mmio_region(addr
, size
);
2762 void qemu_flush_coalesced_mmio_buffer(void)
2765 kvm_flush_coalesced_mmio_buffer();
2768 #if defined(__linux__) && !defined(TARGET_S390X)
2770 #include <sys/vfs.h>
2772 #define HUGETLBFS_MAGIC 0x958458f6
2774 static long gethugepagesize(const char *path
)
2780 ret
= statfs(path
, &fs
);
2781 } while (ret
!= 0 && errno
== EINTR
);
2788 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2789 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2794 static void *file_ram_alloc(RAMBlock
*block
,
2804 unsigned long hpagesize
;
2806 hpagesize
= gethugepagesize(path
);
2811 if (memory
< hpagesize
) {
2815 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2816 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2820 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2824 fd
= mkstemp(filename
);
2826 perror("unable to create backing store for hugepages");
2833 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2836 * ftruncate is not supported by hugetlbfs in older
2837 * hosts, so don't bother bailing out on errors.
2838 * If anything goes wrong with it under other filesystems,
2841 if (ftruncate(fd
, memory
))
2842 perror("ftruncate");
2845 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2846 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2847 * to sidestep this quirk.
2849 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2850 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2852 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2854 if (area
== MAP_FAILED
) {
2855 perror("file_ram_alloc: can't mmap RAM pages");
2864 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2866 RAMBlock
*block
, *next_block
;
2867 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2869 if (QLIST_EMPTY(&ram_list
.blocks
))
2872 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2873 ram_addr_t end
, next
= ULONG_MAX
;
2875 end
= block
->offset
+ block
->length
;
2877 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2878 if (next_block
->offset
>= end
) {
2879 next
= MIN(next
, next_block
->offset
);
2882 if (next
- end
>= size
&& next
- end
< mingap
) {
2884 mingap
= next
- end
;
2890 static ram_addr_t
last_ram_offset(void)
2893 ram_addr_t last
= 0;
2895 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2896 last
= MAX(last
, block
->offset
+ block
->length
);
2901 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2902 ram_addr_t size
, void *host
)
2904 RAMBlock
*new_block
, *block
;
2906 size
= TARGET_PAGE_ALIGN(size
);
2907 new_block
= qemu_mallocz(sizeof(*new_block
));
2909 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2910 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2912 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2916 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2918 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2919 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2920 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2926 new_block
->offset
= find_ram_offset(size
);
2928 new_block
->host
= host
;
2929 new_block
->flags
|= RAM_PREALLOC_MASK
;
2932 #if defined (__linux__) && !defined(TARGET_S390X)
2933 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2934 if (!new_block
->host
) {
2935 new_block
->host
= qemu_vmalloc(size
);
2936 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2939 fprintf(stderr
, "-mem-path option unsupported\n");
2943 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2944 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2945 an system defined value, which is at least 256GB. Larger systems
2946 have larger values. We put the guest between the end of data
2947 segment (system break) and this value. We use 32GB as a base to
2948 have enough room for the system break to grow. */
2949 new_block
->host
= mmap((void*)0x800000000, size
,
2950 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2951 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2952 if (new_block
->host
== MAP_FAILED
) {
2953 fprintf(stderr
, "Allocating RAM failed\n");
2957 if (xen_mapcache_enabled()) {
2958 xen_ram_alloc(new_block
->offset
, size
);
2960 new_block
->host
= qemu_vmalloc(size
);
2963 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2966 new_block
->length
= size
;
2968 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2970 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2971 last_ram_offset() >> TARGET_PAGE_BITS
);
2972 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2973 0xff, size
>> TARGET_PAGE_BITS
);
2976 kvm_setup_guest_memory(new_block
->host
, size
);
2978 return new_block
->offset
;
2981 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2983 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2986 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2990 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2991 if (addr
== block
->offset
) {
2992 QLIST_REMOVE(block
, next
);
2999 void qemu_ram_free(ram_addr_t addr
)
3003 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3004 if (addr
== block
->offset
) {
3005 QLIST_REMOVE(block
, next
);
3006 if (block
->flags
& RAM_PREALLOC_MASK
) {
3008 } else if (mem_path
) {
3009 #if defined (__linux__) && !defined(TARGET_S390X)
3011 munmap(block
->host
, block
->length
);
3014 qemu_vfree(block
->host
);
3020 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3021 munmap(block
->host
, block
->length
);
3023 if (xen_mapcache_enabled()) {
3024 qemu_invalidate_entry(block
->host
);
3026 qemu_vfree(block
->host
);
3038 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3045 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3046 offset
= addr
- block
->offset
;
3047 if (offset
< block
->length
) {
3048 vaddr
= block
->host
+ offset
;
3049 if (block
->flags
& RAM_PREALLOC_MASK
) {
3053 munmap(vaddr
, length
);
3055 #if defined(__linux__) && !defined(TARGET_S390X)
3058 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3061 flags
|= MAP_PRIVATE
;
3063 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3064 flags
, block
->fd
, offset
);
3066 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3067 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3074 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3075 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3076 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3079 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3080 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3084 if (area
!= vaddr
) {
3085 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3089 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3095 #endif /* !_WIN32 */
3097 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3098 With the exception of the softmmu code in this file, this should
3099 only be used for local memory (e.g. video ram) that the device owns,
3100 and knows it isn't going to access beyond the end of the block.
3102 It should not be used for general purpose DMA.
3103 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3105 void *qemu_get_ram_ptr(ram_addr_t addr
)
3109 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3110 if (addr
- block
->offset
< block
->length
) {
3111 /* Move this entry to to start of the list. */
3112 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3113 QLIST_REMOVE(block
, next
);
3114 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3116 if (xen_mapcache_enabled()) {
3117 /* We need to check if the requested address is in the RAM
3118 * because we don't want to map the entire memory in QEMU.
3119 * In that case just map until the end of the page.
3121 if (block
->offset
== 0) {
3122 return qemu_map_cache(addr
, 0, 0);
3123 } else if (block
->host
== NULL
) {
3124 block
->host
= qemu_map_cache(block
->offset
, block
->length
, 1);
3127 return block
->host
+ (addr
- block
->offset
);
3131 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3137 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3138 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3140 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3144 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3145 if (addr
- block
->offset
< block
->length
) {
3146 if (xen_mapcache_enabled()) {
3147 /* We need to check if the requested address is in the RAM
3148 * because we don't want to map the entire memory in QEMU.
3149 * In that case just map until the end of the page.
3151 if (block
->offset
== 0) {
3152 return qemu_map_cache(addr
, 0, 0);
3153 } else if (block
->host
== NULL
) {
3154 block
->host
= qemu_map_cache(block
->offset
, block
->length
, 1);
3157 return block
->host
+ (addr
- block
->offset
);
3161 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3167 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3168 * but takes a size argument */
3169 void *qemu_ram_ptr_length(target_phys_addr_t addr
, target_phys_addr_t
*size
)
3171 if (xen_mapcache_enabled())
3172 return qemu_map_cache(addr
, *size
, 1);
3176 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3177 if (addr
- block
->offset
< block
->length
) {
3178 if (addr
- block
->offset
+ *size
> block
->length
)
3179 *size
= block
->length
- addr
+ block
->offset
;
3180 return block
->host
+ (addr
- block
->offset
);
3184 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3192 void qemu_put_ram_ptr(void *addr
)
3194 trace_qemu_put_ram_ptr(addr
);
3197 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3200 uint8_t *host
= ptr
;
3202 if (xen_mapcache_enabled()) {
3203 *ram_addr
= qemu_ram_addr_from_mapcache(ptr
);
3207 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3208 /* This case append when the block is not mapped. */
3209 if (block
->host
== NULL
) {
3212 if (host
- block
->host
< block
->length
) {
3213 *ram_addr
= block
->offset
+ (host
- block
->host
);
3221 /* Some of the softmmu routines need to translate from a host pointer
3222 (typically a TLB entry) back to a ram offset. */
3223 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3225 ram_addr_t ram_addr
;
3227 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3228 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3234 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3236 #ifdef DEBUG_UNASSIGNED
3237 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3239 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3240 do_unassigned_access(addr
, 0, 0, 0, 1);
3245 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3247 #ifdef DEBUG_UNASSIGNED
3248 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3250 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3251 do_unassigned_access(addr
, 0, 0, 0, 2);
3256 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3258 #ifdef DEBUG_UNASSIGNED
3259 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3261 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3262 do_unassigned_access(addr
, 0, 0, 0, 4);
3267 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3269 #ifdef DEBUG_UNASSIGNED
3270 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3272 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3273 do_unassigned_access(addr
, 1, 0, 0, 1);
3277 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3279 #ifdef DEBUG_UNASSIGNED
3280 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3282 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3283 do_unassigned_access(addr
, 1, 0, 0, 2);
3287 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3289 #ifdef DEBUG_UNASSIGNED
3290 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3292 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3293 do_unassigned_access(addr
, 1, 0, 0, 4);
3297 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3298 unassigned_mem_readb
,
3299 unassigned_mem_readw
,
3300 unassigned_mem_readl
,
3303 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3304 unassigned_mem_writeb
,
3305 unassigned_mem_writew
,
3306 unassigned_mem_writel
,
3309 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3313 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3314 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3315 #if !defined(CONFIG_USER_ONLY)
3316 tb_invalidate_phys_page_fast(ram_addr
, 1);
3317 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3320 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3321 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3322 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3323 /* we remove the notdirty callback only if the code has been
3325 if (dirty_flags
== 0xff)
3326 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3329 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3333 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3334 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3335 #if !defined(CONFIG_USER_ONLY)
3336 tb_invalidate_phys_page_fast(ram_addr
, 2);
3337 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3340 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3341 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3342 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3343 /* we remove the notdirty callback only if the code has been
3345 if (dirty_flags
== 0xff)
3346 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3349 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3353 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3354 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3355 #if !defined(CONFIG_USER_ONLY)
3356 tb_invalidate_phys_page_fast(ram_addr
, 4);
3357 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3360 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3361 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3362 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3363 /* we remove the notdirty callback only if the code has been
3365 if (dirty_flags
== 0xff)
3366 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3369 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3370 NULL
, /* never used */
3371 NULL
, /* never used */
3372 NULL
, /* never used */
3375 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3376 notdirty_mem_writeb
,
3377 notdirty_mem_writew
,
3378 notdirty_mem_writel
,
3381 /* Generate a debug exception if a watchpoint has been hit. */
3382 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3384 CPUState
*env
= cpu_single_env
;
3385 target_ulong pc
, cs_base
;
3386 TranslationBlock
*tb
;
3391 if (env
->watchpoint_hit
) {
3392 /* We re-entered the check after replacing the TB. Now raise
3393 * the debug interrupt so that is will trigger after the
3394 * current instruction. */
3395 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3398 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3399 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3400 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3401 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3402 wp
->flags
|= BP_WATCHPOINT_HIT
;
3403 if (!env
->watchpoint_hit
) {
3404 env
->watchpoint_hit
= wp
;
3405 tb
= tb_find_pc(env
->mem_io_pc
);
3407 cpu_abort(env
, "check_watchpoint: could not find TB for "
3408 "pc=%p", (void *)env
->mem_io_pc
);
3410 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3411 tb_phys_invalidate(tb
, -1);
3412 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3413 env
->exception_index
= EXCP_DEBUG
;
3415 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3416 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3418 cpu_resume_from_signal(env
, NULL
);
3421 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3426 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3427 so these check for a hit then pass through to the normal out-of-line
3429 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3431 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3432 return ldub_phys(addr
);
3435 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3437 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3438 return lduw_phys(addr
);
3441 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3443 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3444 return ldl_phys(addr
);
3447 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3450 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3451 stb_phys(addr
, val
);
3454 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3457 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3458 stw_phys(addr
, val
);
3461 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3464 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3465 stl_phys(addr
, val
);
3468 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3474 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3480 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3481 target_phys_addr_t addr
,
3484 unsigned int idx
= SUBPAGE_IDX(addr
);
3485 #if defined(DEBUG_SUBPAGE)
3486 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3487 mmio
, len
, addr
, idx
);
3490 addr
+= mmio
->region_offset
[idx
];
3491 idx
= mmio
->sub_io_index
[idx
];
3492 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3495 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3496 uint32_t value
, unsigned int len
)
3498 unsigned int idx
= SUBPAGE_IDX(addr
);
3499 #if defined(DEBUG_SUBPAGE)
3500 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3501 __func__
, mmio
, len
, addr
, idx
, value
);
3504 addr
+= mmio
->region_offset
[idx
];
3505 idx
= mmio
->sub_io_index
[idx
];
3506 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3509 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3511 return subpage_readlen(opaque
, addr
, 0);
3514 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3517 subpage_writelen(opaque
, addr
, value
, 0);
3520 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3522 return subpage_readlen(opaque
, addr
, 1);
3525 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3528 subpage_writelen(opaque
, addr
, value
, 1);
3531 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3533 return subpage_readlen(opaque
, addr
, 2);
3536 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3539 subpage_writelen(opaque
, addr
, value
, 2);
3542 static CPUReadMemoryFunc
* const subpage_read
[] = {
3548 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3554 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3555 ram_addr_t memory
, ram_addr_t region_offset
)
3559 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3561 idx
= SUBPAGE_IDX(start
);
3562 eidx
= SUBPAGE_IDX(end
);
3563 #if defined(DEBUG_SUBPAGE)
3564 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3565 mmio
, start
, end
, idx
, eidx
, memory
);
3567 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3568 memory
= IO_MEM_UNASSIGNED
;
3569 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3570 for (; idx
<= eidx
; idx
++) {
3571 mmio
->sub_io_index
[idx
] = memory
;
3572 mmio
->region_offset
[idx
] = region_offset
;
3578 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3579 ram_addr_t orig_memory
,
3580 ram_addr_t region_offset
)
3585 mmio
= qemu_mallocz(sizeof(subpage_t
));
3588 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3589 DEVICE_NATIVE_ENDIAN
);
3590 #if defined(DEBUG_SUBPAGE)
3591 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3592 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3594 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3595 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3600 static int get_free_io_mem_idx(void)
3604 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3605 if (!io_mem_used
[i
]) {
3609 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3614 * Usually, devices operate in little endian mode. There are devices out
3615 * there that operate in big endian too. Each device gets byte swapped
3616 * mmio if plugged onto a CPU that does the other endianness.
3626 typedef struct SwapEndianContainer
{
3627 CPUReadMemoryFunc
*read
[3];
3628 CPUWriteMemoryFunc
*write
[3];
3630 } SwapEndianContainer
;
3632 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3635 SwapEndianContainer
*c
= opaque
;
3636 val
= c
->read
[0](c
->opaque
, addr
);
3640 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3643 SwapEndianContainer
*c
= opaque
;
3644 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3648 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3651 SwapEndianContainer
*c
= opaque
;
3652 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3656 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3657 swapendian_mem_readb
,
3658 swapendian_mem_readw
,
3659 swapendian_mem_readl
3662 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3665 SwapEndianContainer
*c
= opaque
;
3666 c
->write
[0](c
->opaque
, addr
, val
);
3669 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3672 SwapEndianContainer
*c
= opaque
;
3673 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3676 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3679 SwapEndianContainer
*c
= opaque
;
3680 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3683 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3684 swapendian_mem_writeb
,
3685 swapendian_mem_writew
,
3686 swapendian_mem_writel
3689 static void swapendian_init(int io_index
)
3691 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3694 /* Swap mmio for big endian targets */
3695 c
->opaque
= io_mem_opaque
[io_index
];
3696 for (i
= 0; i
< 3; i
++) {
3697 c
->read
[i
] = io_mem_read
[io_index
][i
];
3698 c
->write
[i
] = io_mem_write
[io_index
][i
];
3700 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3701 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3703 io_mem_opaque
[io_index
] = c
;
3706 static void swapendian_del(int io_index
)
3708 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3709 qemu_free(io_mem_opaque
[io_index
]);
3713 /* mem_read and mem_write are arrays of functions containing the
3714 function to access byte (index 0), word (index 1) and dword (index
3715 2). Functions can be omitted with a NULL function pointer.
3716 If io_index is non zero, the corresponding io zone is
3717 modified. If it is zero, a new io zone is allocated. The return
3718 value can be used with cpu_register_physical_memory(). (-1) is
3719 returned if error. */
3720 static int cpu_register_io_memory_fixed(int io_index
,
3721 CPUReadMemoryFunc
* const *mem_read
,
3722 CPUWriteMemoryFunc
* const *mem_write
,
3723 void *opaque
, enum device_endian endian
)
3727 if (io_index
<= 0) {
3728 io_index
= get_free_io_mem_idx();
3732 io_index
>>= IO_MEM_SHIFT
;
3733 if (io_index
>= IO_MEM_NB_ENTRIES
)
3737 for (i
= 0; i
< 3; ++i
) {
3738 io_mem_read
[io_index
][i
]
3739 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3741 for (i
= 0; i
< 3; ++i
) {
3742 io_mem_write
[io_index
][i
]
3743 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3745 io_mem_opaque
[io_index
] = opaque
;
3748 case DEVICE_BIG_ENDIAN
:
3749 #ifndef TARGET_WORDS_BIGENDIAN
3750 swapendian_init(io_index
);
3753 case DEVICE_LITTLE_ENDIAN
:
3754 #ifdef TARGET_WORDS_BIGENDIAN
3755 swapendian_init(io_index
);
3758 case DEVICE_NATIVE_ENDIAN
:
3763 return (io_index
<< IO_MEM_SHIFT
);
3766 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3767 CPUWriteMemoryFunc
* const *mem_write
,
3768 void *opaque
, enum device_endian endian
)
3770 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3773 void cpu_unregister_io_memory(int io_table_address
)
3776 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3778 swapendian_del(io_index
);
3780 for (i
=0;i
< 3; i
++) {
3781 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3782 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3784 io_mem_opaque
[io_index
] = NULL
;
3785 io_mem_used
[io_index
] = 0;
3788 static void io_mem_init(void)
3792 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3793 unassigned_mem_write
, NULL
,
3794 DEVICE_NATIVE_ENDIAN
);
3795 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3796 unassigned_mem_write
, NULL
,
3797 DEVICE_NATIVE_ENDIAN
);
3798 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3799 notdirty_mem_write
, NULL
,
3800 DEVICE_NATIVE_ENDIAN
);
3804 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3805 watch_mem_write
, NULL
,
3806 DEVICE_NATIVE_ENDIAN
);
3809 #endif /* !defined(CONFIG_USER_ONLY) */
3811 /* physical memory access (slow version, mainly for debug) */
3812 #if defined(CONFIG_USER_ONLY)
3813 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3814 uint8_t *buf
, int len
, int is_write
)
3821 page
= addr
& TARGET_PAGE_MASK
;
3822 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3825 flags
= page_get_flags(page
);
3826 if (!(flags
& PAGE_VALID
))
3829 if (!(flags
& PAGE_WRITE
))
3831 /* XXX: this code should not depend on lock_user */
3832 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3835 unlock_user(p
, addr
, l
);
3837 if (!(flags
& PAGE_READ
))
3839 /* XXX: this code should not depend on lock_user */
3840 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3843 unlock_user(p
, addr
, 0);
3853 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3854 int len
, int is_write
)
3859 target_phys_addr_t page
;
3864 page
= addr
& TARGET_PAGE_MASK
;
3865 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3868 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3870 pd
= IO_MEM_UNASSIGNED
;
3872 pd
= p
->phys_offset
;
3876 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3877 target_phys_addr_t addr1
= addr
;
3878 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3880 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3881 /* XXX: could force cpu_single_env to NULL to avoid
3883 if (l
>= 4 && ((addr1
& 3) == 0)) {
3884 /* 32 bit write access */
3886 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3888 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3889 /* 16 bit write access */
3891 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3894 /* 8 bit write access */
3896 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3900 unsigned long addr1
;
3901 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3903 ptr
= qemu_get_ram_ptr(addr1
);
3904 memcpy(ptr
, buf
, l
);
3905 if (!cpu_physical_memory_is_dirty(addr1
)) {
3906 /* invalidate code */
3907 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3909 cpu_physical_memory_set_dirty_flags(
3910 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3912 qemu_put_ram_ptr(ptr
);
3915 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3916 !(pd
& IO_MEM_ROMD
)) {
3917 target_phys_addr_t addr1
= addr
;
3919 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3921 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3922 if (l
>= 4 && ((addr1
& 3) == 0)) {
3923 /* 32 bit read access */
3924 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3927 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3928 /* 16 bit read access */
3929 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3933 /* 8 bit read access */
3934 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3940 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3941 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3942 qemu_put_ram_ptr(ptr
);
3951 /* used for ROM loading : can write in RAM and ROM */
3952 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3953 const uint8_t *buf
, int len
)
3957 target_phys_addr_t page
;
3962 page
= addr
& TARGET_PAGE_MASK
;
3963 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3966 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3968 pd
= IO_MEM_UNASSIGNED
;
3970 pd
= p
->phys_offset
;
3973 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3974 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3975 !(pd
& IO_MEM_ROMD
)) {
3978 unsigned long addr1
;
3979 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3981 ptr
= qemu_get_ram_ptr(addr1
);
3982 memcpy(ptr
, buf
, l
);
3983 qemu_put_ram_ptr(ptr
);
3993 target_phys_addr_t addr
;
3994 target_phys_addr_t len
;
3997 static BounceBuffer bounce
;
3999 typedef struct MapClient
{
4001 void (*callback
)(void *opaque
);
4002 QLIST_ENTRY(MapClient
) link
;
4005 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
4006 = QLIST_HEAD_INITIALIZER(map_client_list
);
4008 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
4010 MapClient
*client
= qemu_malloc(sizeof(*client
));
4012 client
->opaque
= opaque
;
4013 client
->callback
= callback
;
4014 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
4018 void cpu_unregister_map_client(void *_client
)
4020 MapClient
*client
= (MapClient
*)_client
;
4022 QLIST_REMOVE(client
, link
);
4026 static void cpu_notify_map_clients(void)
4030 while (!QLIST_EMPTY(&map_client_list
)) {
4031 client
= QLIST_FIRST(&map_client_list
);
4032 client
->callback(client
->opaque
);
4033 cpu_unregister_map_client(client
);
4037 /* Map a physical memory region into a host virtual address.
4038 * May map a subset of the requested range, given by and returned in *plen.
4039 * May return NULL if resources needed to perform the mapping are exhausted.
4040 * Use only for reads OR writes - not for read-modify-write operations.
4041 * Use cpu_register_map_client() to know when retrying the map operation is
4042 * likely to succeed.
4044 void *cpu_physical_memory_map(target_phys_addr_t addr
,
4045 target_phys_addr_t
*plen
,
4048 target_phys_addr_t len
= *plen
;
4049 target_phys_addr_t todo
= 0;
4051 target_phys_addr_t page
;
4054 target_phys_addr_t addr1
= addr
;
4057 page
= addr
& TARGET_PAGE_MASK
;
4058 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4061 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4063 pd
= IO_MEM_UNASSIGNED
;
4065 pd
= p
->phys_offset
;
4068 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4069 if (todo
|| bounce
.buffer
) {
4072 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4076 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4080 return bounce
.buffer
;
4088 return qemu_ram_ptr_length(addr1
, plen
);
4091 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4092 * Will also mark the memory as dirty if is_write == 1. access_len gives
4093 * the amount of memory that was actually read or written by the caller.
4095 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4096 int is_write
, target_phys_addr_t access_len
)
4098 if (buffer
!= bounce
.buffer
) {
4100 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4101 while (access_len
) {
4103 l
= TARGET_PAGE_SIZE
;
4106 if (!cpu_physical_memory_is_dirty(addr1
)) {
4107 /* invalidate code */
4108 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4110 cpu_physical_memory_set_dirty_flags(
4111 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4117 if (xen_mapcache_enabled()) {
4118 qemu_invalidate_entry(buffer
);
4123 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4125 qemu_vfree(bounce
.buffer
);
4126 bounce
.buffer
= NULL
;
4127 cpu_notify_map_clients();
4130 /* warning: addr must be aligned */
4131 uint32_t ldl_phys(target_phys_addr_t addr
)
4139 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4141 pd
= IO_MEM_UNASSIGNED
;
4143 pd
= p
->phys_offset
;
4146 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4147 !(pd
& IO_MEM_ROMD
)) {
4149 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4151 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4152 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4155 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4156 (addr
& ~TARGET_PAGE_MASK
);
4162 /* warning: addr must be aligned */
4163 uint64_t ldq_phys(target_phys_addr_t addr
)
4171 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4173 pd
= IO_MEM_UNASSIGNED
;
4175 pd
= p
->phys_offset
;
4178 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4179 !(pd
& IO_MEM_ROMD
)) {
4181 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4183 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4184 #ifdef TARGET_WORDS_BIGENDIAN
4185 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4186 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4188 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4189 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4193 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4194 (addr
& ~TARGET_PAGE_MASK
);
4201 uint32_t ldub_phys(target_phys_addr_t addr
)
4204 cpu_physical_memory_read(addr
, &val
, 1);
4208 /* warning: addr must be aligned */
4209 uint32_t lduw_phys(target_phys_addr_t addr
)
4217 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4219 pd
= IO_MEM_UNASSIGNED
;
4221 pd
= p
->phys_offset
;
4224 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4225 !(pd
& IO_MEM_ROMD
)) {
4227 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4229 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4230 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4233 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4234 (addr
& ~TARGET_PAGE_MASK
);
4240 /* warning: addr must be aligned. The ram page is not masked as dirty
4241 and the code inside is not invalidated. It is useful if the dirty
4242 bits are used to track modified PTEs */
4243 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4250 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4252 pd
= IO_MEM_UNASSIGNED
;
4254 pd
= p
->phys_offset
;
4257 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4258 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4260 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4261 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4263 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4264 ptr
= qemu_get_ram_ptr(addr1
);
4267 if (unlikely(in_migration
)) {
4268 if (!cpu_physical_memory_is_dirty(addr1
)) {
4269 /* invalidate code */
4270 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4272 cpu_physical_memory_set_dirty_flags(
4273 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4279 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4286 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4288 pd
= IO_MEM_UNASSIGNED
;
4290 pd
= p
->phys_offset
;
4293 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4294 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4296 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4297 #ifdef TARGET_WORDS_BIGENDIAN
4298 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4299 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4301 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4302 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4305 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4306 (addr
& ~TARGET_PAGE_MASK
);
4311 /* warning: addr must be aligned */
4312 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4319 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4321 pd
= IO_MEM_UNASSIGNED
;
4323 pd
= p
->phys_offset
;
4326 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4327 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4329 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4330 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4332 unsigned long addr1
;
4333 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4335 ptr
= qemu_get_ram_ptr(addr1
);
4337 if (!cpu_physical_memory_is_dirty(addr1
)) {
4338 /* invalidate code */
4339 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4341 cpu_physical_memory_set_dirty_flags(addr1
,
4342 (0xff & ~CODE_DIRTY_FLAG
));
4348 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4351 cpu_physical_memory_write(addr
, &v
, 1);
4354 /* warning: addr must be aligned */
4355 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4362 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4364 pd
= IO_MEM_UNASSIGNED
;
4366 pd
= p
->phys_offset
;
4369 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4370 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4372 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4373 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4375 unsigned long addr1
;
4376 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4378 ptr
= qemu_get_ram_ptr(addr1
);
4380 if (!cpu_physical_memory_is_dirty(addr1
)) {
4381 /* invalidate code */
4382 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4384 cpu_physical_memory_set_dirty_flags(addr1
,
4385 (0xff & ~CODE_DIRTY_FLAG
));
4391 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4394 cpu_physical_memory_write(addr
, &val
, 8);
4397 /* virtual memory access for debug (includes writing to ROM) */
4398 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4399 uint8_t *buf
, int len
, int is_write
)
4402 target_phys_addr_t phys_addr
;
4406 page
= addr
& TARGET_PAGE_MASK
;
4407 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4408 /* if no physical page mapped, return an error */
4409 if (phys_addr
== -1)
4411 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4414 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4416 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4418 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4427 /* in deterministic execution mode, instructions doing device I/Os
4428 must be at the end of the TB */
4429 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4431 TranslationBlock
*tb
;
4433 target_ulong pc
, cs_base
;
4436 tb
= tb_find_pc((unsigned long)retaddr
);
4438 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4441 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4442 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4443 /* Calculate how many instructions had been executed before the fault
4445 n
= n
- env
->icount_decr
.u16
.low
;
4446 /* Generate a new TB ending on the I/O insn. */
4448 /* On MIPS and SH, delay slot instructions can only be restarted if
4449 they were already the first instruction in the TB. If this is not
4450 the first instruction in a TB then re-execute the preceding
4452 #if defined(TARGET_MIPS)
4453 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4454 env
->active_tc
.PC
-= 4;
4455 env
->icount_decr
.u16
.low
++;
4456 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4458 #elif defined(TARGET_SH4)
4459 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4462 env
->icount_decr
.u16
.low
++;
4463 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4466 /* This should never happen. */
4467 if (n
> CF_COUNT_MASK
)
4468 cpu_abort(env
, "TB too big during recompile");
4470 cflags
= n
| CF_LAST_IO
;
4472 cs_base
= tb
->cs_base
;
4474 tb_phys_invalidate(tb
, -1);
4475 /* FIXME: In theory this could raise an exception. In practice
4476 we have already translated the block once so it's probably ok. */
4477 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4478 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4479 the first in the TB) then we end up generating a whole new TB and
4480 repeating the fault, which is horribly inefficient.
4481 Better would be to execute just this insn uncached, or generate a
4483 cpu_resume_from_signal(env
, NULL
);
4486 #if !defined(CONFIG_USER_ONLY)
4488 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4490 int i
, target_code_size
, max_target_code_size
;
4491 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4492 TranslationBlock
*tb
;
4494 target_code_size
= 0;
4495 max_target_code_size
= 0;
4497 direct_jmp_count
= 0;
4498 direct_jmp2_count
= 0;
4499 for(i
= 0; i
< nb_tbs
; i
++) {
4501 target_code_size
+= tb
->size
;
4502 if (tb
->size
> max_target_code_size
)
4503 max_target_code_size
= tb
->size
;
4504 if (tb
->page_addr
[1] != -1)
4506 if (tb
->tb_next_offset
[0] != 0xffff) {
4508 if (tb
->tb_next_offset
[1] != 0xffff) {
4509 direct_jmp2_count
++;
4513 /* XXX: avoid using doubles ? */
4514 cpu_fprintf(f
, "Translation buffer state:\n");
4515 cpu_fprintf(f
, "gen code size %td/%ld\n",
4516 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4517 cpu_fprintf(f
, "TB count %d/%d\n",
4518 nb_tbs
, code_gen_max_blocks
);
4519 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4520 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4521 max_target_code_size
);
4522 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4523 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4524 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4525 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4527 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4528 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4530 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4532 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4533 cpu_fprintf(f
, "\nStatistics:\n");
4534 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4535 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4536 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4537 tcg_dump_info(f
, cpu_fprintf
);
4540 #define MMUSUFFIX _cmmu
4541 #define GETPC() NULL
4542 #define env cpu_single_env
4543 #define SOFTMMU_CODE_ACCESS
4546 #include "softmmu_template.h"
4549 #include "softmmu_template.h"
4552 #include "softmmu_template.h"
4555 #include "softmmu_template.h"