2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
36 #if defined(CONFIG_USER_ONLY)
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <machine/profile.h>
56 //#define DEBUG_TB_INVALIDATE
59 //#define DEBUG_UNASSIGNED
61 /* make various TB consistency checks */
62 //#define DEBUG_TB_CHECK
63 //#define DEBUG_TLB_CHECK
65 //#define DEBUG_IOPORT
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 static TranslationBlock
*tbs
;
76 static int code_gen_max_blocks
;
77 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
82 #if defined(__arm__) || defined(__sparc_v9__)
83 /* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
85 section close to code segment. */
86 #define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
90 /* Maximum alignment for Win32 is 16. */
91 #define code_gen_section \
92 __attribute__((aligned (16)))
94 #define code_gen_section \
95 __attribute__((aligned (32)))
98 uint8_t code_gen_prologue
[1024] code_gen_section
;
99 static uint8_t *code_gen_buffer
;
100 static unsigned long code_gen_buffer_size
;
101 /* threshold to flush the translated code buffer */
102 static unsigned long code_gen_buffer_max_size
;
103 static uint8_t *code_gen_ptr
;
105 #if !defined(CONFIG_USER_ONLY)
107 static int in_migration
;
109 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
113 /* current CPU in the current thread. It is only valid inside
115 CPUState
*cpu_single_env
;
116 /* 0 = Do not count executed instructions.
117 1 = Precise instruction counting.
118 2 = Adaptive rate instruction counting. */
120 /* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
124 typedef struct PageDesc
{
125 /* list of TBs intersecting this ram page */
126 TranslationBlock
*first_tb
;
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count
;
130 uint8_t *code_bitmap
;
131 #if defined(CONFIG_USER_ONLY)
136 /* In system mode we want L1_MAP to be based on ram offsets,
137 while in user mode we want it to be based on virtual addresses. */
138 #if !defined(CONFIG_USER_ONLY)
139 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
142 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
145 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
148 /* Size of the L2 (and L3, etc) page tables. */
150 #define L2_SIZE (1 << L2_BITS)
152 /* The bits remaining after N lower levels of page tables. */
153 #define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 #define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 /* Size of the L1 page table. Avoid silly small sizes. */
159 #if P_L1_BITS_REM < 4
160 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
162 #define P_L1_BITS P_L1_BITS_REM
165 #if V_L1_BITS_REM < 4
166 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
168 #define V_L1_BITS V_L1_BITS_REM
171 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
174 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
177 unsigned long qemu_real_host_page_size
;
178 unsigned long qemu_host_page_bits
;
179 unsigned long qemu_host_page_size
;
180 unsigned long qemu_host_page_mask
;
182 /* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184 static void *l1_map
[V_L1_SIZE
];
186 #if !defined(CONFIG_USER_ONLY)
187 typedef struct PhysPageDesc
{
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset
;
190 ram_addr_t region_offset
;
193 /* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195 static void *l1_phys_map
[P_L1_SIZE
];
197 static void io_mem_init(void);
199 /* io memory support */
200 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
201 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
202 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
203 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
204 static int io_mem_watch
;
209 static const char *logfilename
= "qemu.log";
211 static const char *logfilename
= "/tmp/qemu.log";
215 static int log_append
= 0;
218 #if !defined(CONFIG_USER_ONLY)
219 static int tlb_flush_count
;
221 static int tb_flush_count
;
222 static int tb_phys_invalidate_count
;
225 static void map_exec(void *addr
, long size
)
228 VirtualProtect(addr
, size
,
229 PAGE_EXECUTE_READWRITE
, &old_protect
);
233 static void map_exec(void *addr
, long size
)
235 unsigned long start
, end
, page_size
;
237 page_size
= getpagesize();
238 start
= (unsigned long)addr
;
239 start
&= ~(page_size
- 1);
241 end
= (unsigned long)addr
+ size
;
242 end
+= page_size
- 1;
243 end
&= ~(page_size
- 1);
245 mprotect((void *)start
, end
- start
,
246 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
250 static void page_init(void)
252 /* NOTE: we can always suppose that qemu_host_page_size >=
256 SYSTEM_INFO system_info
;
258 GetSystemInfo(&system_info
);
259 qemu_real_host_page_size
= system_info
.dwPageSize
;
262 qemu_real_host_page_size
= getpagesize();
264 if (qemu_host_page_size
== 0)
265 qemu_host_page_size
= qemu_real_host_page_size
;
266 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
267 qemu_host_page_size
= TARGET_PAGE_SIZE
;
268 qemu_host_page_bits
= 0;
269 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
270 qemu_host_page_bits
++;
271 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
273 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
275 #ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry
*freep
;
279 freep
= kinfo_getvmmap(getpid(), &cnt
);
282 for (i
= 0; i
< cnt
; i
++) {
283 unsigned long startaddr
, endaddr
;
285 startaddr
= freep
[i
].kve_start
;
286 endaddr
= freep
[i
].kve_end
;
287 if (h2g_valid(startaddr
)) {
288 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
290 if (h2g_valid(endaddr
)) {
291 endaddr
= h2g(endaddr
);
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
294 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
296 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
307 last_brk
= (unsigned long)sbrk(0);
309 f
= fopen("/compat/linux/proc/self/maps", "r");
314 unsigned long startaddr
, endaddr
;
317 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
319 if (n
== 2 && h2g_valid(startaddr
)) {
320 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
322 if (h2g_valid(endaddr
)) {
323 endaddr
= h2g(endaddr
);
327 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
339 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
345 #if defined(CONFIG_USER_ONLY)
346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 # define ALLOC(P, SIZE) \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
353 # define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
357 /* Level 1. Always allocated. */
358 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
361 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
368 ALLOC(p
, sizeof(void *) * L2_SIZE
);
372 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
380 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
386 return pd
+ (index
& (L2_SIZE
- 1));
389 static inline PageDesc
*page_find(tb_page_addr_t index
)
391 return page_find_alloc(index
, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
401 /* Level 1. Always allocated. */
402 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
405 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
411 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
413 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
426 for (i
= 0; i
< L2_SIZE
; i
++) {
427 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
428 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
432 return pd
+ (index
& (L2_SIZE
- 1));
435 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
437 return phys_page_find_alloc(index
, 0);
440 static void tlb_protect_code(ram_addr_t ram_addr
);
441 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
443 #define mmap_lock() do { } while(0)
444 #define mmap_unlock() do { } while(0)
447 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
449 #if defined(CONFIG_USER_ONLY)
450 /* Currently it is not recommended to allocate big chunks of data in
451 user mode. It will change when a dedicated libc will be used */
452 #define USE_STATIC_CODE_GEN_BUFFER
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
457 __attribute__((aligned (CODE_GEN_ALIGN
)));
460 static void code_gen_alloc(unsigned long tb_size
)
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer
= static_code_gen_buffer
;
464 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
465 map_exec(code_gen_buffer
, code_gen_buffer_size
);
467 code_gen_buffer_size
= tb_size
;
468 if (code_gen_buffer_size
== 0) {
469 #if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
473 /* XXX: needs adjustments */
474 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
477 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
478 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481 #if defined(__linux__)
486 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
487 #if defined(__x86_64__)
489 /* Cannot map more than that */
490 if (code_gen_buffer_size
> (800 * 1024 * 1024))
491 code_gen_buffer_size
= (800 * 1024 * 1024);
492 #elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
495 start
= (void *) 0x60000000UL
;
496 if (code_gen_buffer_size
> (512 * 1024 * 1024))
497 code_gen_buffer_size
= (512 * 1024 * 1024);
498 #elif defined(__arm__)
499 /* Map the buffer below 32M, so we can use direct calls and branches */
501 start
= (void *) 0x01000000UL
;
502 if (code_gen_buffer_size
> 16 * 1024 * 1024)
503 code_gen_buffer_size
= 16 * 1024 * 1024;
504 #elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
510 start
= (void *)0x90000000UL
;
512 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
513 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
515 if (code_gen_buffer
== MAP_FAILED
) {
516 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
520 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
525 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
526 #if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
530 addr
= (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size
> (800 * 1024 * 1024))
533 code_gen_buffer_size
= (800 * 1024 * 1024);
534 #elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
537 addr
= (void *) 0x60000000UL
;
538 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
539 code_gen_buffer_size
= (512 * 1024 * 1024);
542 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
543 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
545 if (code_gen_buffer
== MAP_FAILED
) {
546 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
551 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
552 map_exec(code_gen_buffer
, code_gen_buffer_size
);
554 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
556 code_gen_buffer_max_size
= code_gen_buffer_size
-
557 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
558 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
559 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
562 /* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
565 void cpu_exec_init_all(unsigned long tb_size
)
568 code_gen_alloc(tb_size
);
569 code_gen_ptr
= code_gen_buffer
;
571 #if !defined(CONFIG_USER_ONLY)
574 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx
);
581 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
583 static int cpu_common_post_load(void *opaque
, int version_id
)
585 CPUState
*env
= opaque
;
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env
->interrupt_request
&= ~0x01;
595 static const VMStateDescription vmstate_cpu_common
= {
596 .name
= "cpu_common",
598 .minimum_version_id
= 1,
599 .minimum_version_id_old
= 1,
600 .post_load
= cpu_common_post_load
,
601 .fields
= (VMStateField
[]) {
602 VMSTATE_UINT32(halted
, CPUState
),
603 VMSTATE_UINT32(interrupt_request
, CPUState
),
604 VMSTATE_END_OF_LIST()
609 CPUState
*qemu_get_cpu(int cpu
)
611 CPUState
*env
= first_cpu
;
614 if (env
->cpu_index
== cpu
)
622 void cpu_exec_init(CPUState
*env
)
627 #if defined(CONFIG_USER_ONLY)
630 env
->next_cpu
= NULL
;
633 while (*penv
!= NULL
) {
634 penv
= &(*penv
)->next_cpu
;
637 env
->cpu_index
= cpu_index
;
639 QTAILQ_INIT(&env
->breakpoints
);
640 QTAILQ_INIT(&env
->watchpoints
);
641 #ifndef CONFIG_USER_ONLY
642 env
->thread_id
= qemu_get_thread_id();
645 #if defined(CONFIG_USER_ONLY)
648 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
649 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
650 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
651 cpu_save
, cpu_load
, env
);
655 /* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657 static TranslationBlock
*tb_alloc(target_ulong pc
)
659 TranslationBlock
*tb
;
661 if (nb_tbs
>= code_gen_max_blocks
||
662 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
670 void tb_free(TranslationBlock
*tb
)
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
676 code_gen_ptr
= tb
->tc_ptr
;
681 static inline void invalidate_page_bitmap(PageDesc
*p
)
683 if (p
->code_bitmap
) {
684 qemu_free(p
->code_bitmap
);
685 p
->code_bitmap
= NULL
;
687 p
->code_write_count
= 0;
690 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
692 static void page_flush_tb_1 (int level
, void **lp
)
701 for (i
= 0; i
< L2_SIZE
; ++i
) {
702 pd
[i
].first_tb
= NULL
;
703 invalidate_page_bitmap(pd
+ i
);
707 for (i
= 0; i
< L2_SIZE
; ++i
) {
708 page_flush_tb_1 (level
- 1, pp
+ i
);
713 static void page_flush_tb(void)
716 for (i
= 0; i
< V_L1_SIZE
; i
++) {
717 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
721 /* flush all the translation blocks */
722 /* XXX: tb_flush is currently not thread safe */
723 void tb_flush(CPUState
*env1
)
726 #if defined(DEBUG_FLUSH)
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
730 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
732 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
733 cpu_abort(env1
, "Internal error: code buffer overflow\n");
737 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
738 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
741 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
744 code_gen_ptr
= code_gen_buffer
;
745 /* XXX: flush processor icache at this point if cache flush is
750 #ifdef DEBUG_TB_CHECK
752 static void tb_invalidate_check(target_ulong address
)
754 TranslationBlock
*tb
;
756 address
&= TARGET_PAGE_MASK
;
757 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
758 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
759 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
760 address
>= tb
->pc
+ tb
->size
)) {
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
763 address
, (long)tb
->pc
, tb
->size
);
769 /* verify that all the pages have correct rights for code */
770 static void tb_page_check(void)
772 TranslationBlock
*tb
;
773 int i
, flags1
, flags2
;
775 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
776 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
777 flags1
= page_get_flags(tb
->pc
);
778 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
779 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
781 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
789 /* invalidate one TB */
790 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
793 TranslationBlock
*tb1
;
797 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
800 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
804 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
806 TranslationBlock
*tb1
;
812 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
814 *ptb
= tb1
->page_next
[n1
];
817 ptb
= &tb1
->page_next
[n1
];
821 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
823 TranslationBlock
*tb1
, **ptb
;
826 ptb
= &tb
->jmp_next
[n
];
829 /* find tb(n) in circular list */
833 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
834 if (n1
== n
&& tb1
== tb
)
837 ptb
= &tb1
->jmp_first
;
839 ptb
= &tb1
->jmp_next
[n1
];
842 /* now we can suppress tb(n) from the list */
843 *ptb
= tb
->jmp_next
[n
];
845 tb
->jmp_next
[n
] = NULL
;
849 /* reset the jump entry 'n' of a TB so that it is not chained to
851 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
853 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
856 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
861 tb_page_addr_t phys_pc
;
862 TranslationBlock
*tb1
, *tb2
;
864 /* remove the TB from the hash list */
865 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
866 h
= tb_phys_hash_func(phys_pc
);
867 tb_remove(&tb_phys_hash
[h
], tb
,
868 offsetof(TranslationBlock
, phys_hash_next
));
870 /* remove the TB from the page list */
871 if (tb
->page_addr
[0] != page_addr
) {
872 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
873 tb_page_remove(&p
->first_tb
, tb
);
874 invalidate_page_bitmap(p
);
876 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
877 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
878 tb_page_remove(&p
->first_tb
, tb
);
879 invalidate_page_bitmap(p
);
882 tb_invalidated_flag
= 1;
884 /* remove the TB from the hash list */
885 h
= tb_jmp_cache_hash_func(tb
->pc
);
886 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
887 if (env
->tb_jmp_cache
[h
] == tb
)
888 env
->tb_jmp_cache
[h
] = NULL
;
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb
, 0);
893 tb_jmp_remove(tb
, 1);
895 /* suppress any remaining jumps to this TB */
901 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
902 tb2
= tb1
->jmp_next
[n1
];
903 tb_reset_jump(tb1
, n1
);
904 tb1
->jmp_next
[n1
] = NULL
;
907 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
909 tb_phys_invalidate_count
++;
912 static inline void set_bits(uint8_t *tab
, int start
, int len
)
918 mask
= 0xff << (start
& 7);
919 if ((start
& ~7) == (end
& ~7)) {
921 mask
&= ~(0xff << (end
& 7));
926 start
= (start
+ 8) & ~7;
928 while (start
< end1
) {
933 mask
= ~(0xff << (end
& 7));
939 static void build_page_bitmap(PageDesc
*p
)
941 int n
, tb_start
, tb_end
;
942 TranslationBlock
*tb
;
944 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
949 tb
= (TranslationBlock
*)((long)tb
& ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
955 tb_end
= tb_start
+ tb
->size
;
956 if (tb_end
> TARGET_PAGE_SIZE
)
957 tb_end
= TARGET_PAGE_SIZE
;
960 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
962 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
963 tb
= tb
->page_next
[n
];
967 TranslationBlock
*tb_gen_code(CPUState
*env
,
968 target_ulong pc
, target_ulong cs_base
,
969 int flags
, int cflags
)
971 TranslationBlock
*tb
;
973 tb_page_addr_t phys_pc
, phys_page2
;
974 target_ulong virt_page2
;
977 phys_pc
= get_page_addr_code(env
, pc
);
980 /* flush must be done */
982 /* cannot fail at this point */
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag
= 1;
987 tc_ptr
= code_gen_ptr
;
989 tb
->cs_base
= cs_base
;
992 cpu_gen_code(env
, tb
, &code_gen_size
);
993 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
995 /* check next page if needed */
996 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
998 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
999 phys_page2
= get_page_addr_code(env
, virt_page2
);
1001 tb_link_page(tb
, phys_pc
, phys_page2
);
1005 /* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
1010 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1011 int is_cpu_write_access
)
1013 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1014 CPUState
*env
= cpu_single_env
;
1015 tb_page_addr_t tb_start
, tb_end
;
1018 #ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found
= is_cpu_write_access
;
1020 TranslationBlock
*current_tb
= NULL
;
1021 int current_tb_modified
= 0;
1022 target_ulong current_pc
= 0;
1023 target_ulong current_cs_base
= 0;
1024 int current_flags
= 0;
1025 #endif /* TARGET_HAS_PRECISE_SMC */
1027 p
= page_find(start
>> TARGET_PAGE_BITS
);
1030 if (!p
->code_bitmap
&&
1031 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1032 is_cpu_write_access
) {
1033 /* build code bitmap */
1034 build_page_bitmap(p
);
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1040 while (tb
!= NULL
) {
1042 tb
= (TranslationBlock
*)((long)tb
& ~3);
1043 tb_next
= tb
->page_next
[n
];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1049 tb_end
= tb_start
+ tb
->size
;
1051 tb_start
= tb
->page_addr
[1];
1052 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1054 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1055 #ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found
) {
1057 current_tb_not_found
= 0;
1059 if (env
->mem_io_pc
) {
1060 /* now we have a real cpu fault */
1061 current_tb
= tb_find_pc(env
->mem_io_pc
);
1064 if (current_tb
== tb
&&
1065 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
1072 current_tb_modified
= 1;
1073 cpu_restore_state(current_tb
, env
,
1074 env
->mem_io_pc
, NULL
);
1075 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1078 #endif /* TARGET_HAS_PRECISE_SMC */
1079 /* we need to do that to handle the case where a signal
1080 occurs while doing tb_phys_invalidate() */
1083 saved_tb
= env
->current_tb
;
1084 env
->current_tb
= NULL
;
1086 tb_phys_invalidate(tb
, -1);
1088 env
->current_tb
= saved_tb
;
1089 if (env
->interrupt_request
&& env
->current_tb
)
1090 cpu_interrupt(env
, env
->interrupt_request
);
1095 #if !defined(CONFIG_USER_ONLY)
1096 /* if no code remaining, no need to continue to use slow writes */
1098 invalidate_page_bitmap(p
);
1099 if (is_cpu_write_access
) {
1100 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1104 #ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_modified
) {
1106 /* we generate a block containing just the instruction
1107 modifying the memory. It will ensure that it cannot modify
1109 env
->current_tb
= NULL
;
1110 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1111 cpu_resume_from_signal(env
, NULL
);
1116 /* len must be <= 8 and start must be a multiple of len */
1117 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1123 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1124 cpu_single_env
->mem_io_vaddr
, len
,
1125 cpu_single_env
->eip
,
1126 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1129 p
= page_find(start
>> TARGET_PAGE_BITS
);
1132 if (p
->code_bitmap
) {
1133 offset
= start
& ~TARGET_PAGE_MASK
;
1134 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1135 if (b
& ((1 << len
) - 1))
1139 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1143 #if !defined(CONFIG_SOFTMMU)
1144 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1145 unsigned long pc
, void *puc
)
1147 TranslationBlock
*tb
;
1150 #ifdef TARGET_HAS_PRECISE_SMC
1151 TranslationBlock
*current_tb
= NULL
;
1152 CPUState
*env
= cpu_single_env
;
1153 int current_tb_modified
= 0;
1154 target_ulong current_pc
= 0;
1155 target_ulong current_cs_base
= 0;
1156 int current_flags
= 0;
1159 addr
&= TARGET_PAGE_MASK
;
1160 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1164 #ifdef TARGET_HAS_PRECISE_SMC
1165 if (tb
&& pc
!= 0) {
1166 current_tb
= tb_find_pc(pc
);
1169 while (tb
!= NULL
) {
1171 tb
= (TranslationBlock
*)((long)tb
& ~3);
1172 #ifdef TARGET_HAS_PRECISE_SMC
1173 if (current_tb
== tb
&&
1174 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1175 /* If we are modifying the current TB, we must stop
1176 its execution. We could be more precise by checking
1177 that the modification is after the current PC, but it
1178 would require a specialized function to partially
1179 restore the CPU state */
1181 current_tb_modified
= 1;
1182 cpu_restore_state(current_tb
, env
, pc
, puc
);
1183 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1186 #endif /* TARGET_HAS_PRECISE_SMC */
1187 tb_phys_invalidate(tb
, addr
);
1188 tb
= tb
->page_next
[n
];
1191 #ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb_modified
) {
1193 /* we generate a block containing just the instruction
1194 modifying the memory. It will ensure that it cannot modify
1196 env
->current_tb
= NULL
;
1197 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1198 cpu_resume_from_signal(env
, puc
);
1204 /* add the tb in the target page and protect it if necessary */
1205 static inline void tb_alloc_page(TranslationBlock
*tb
,
1206 unsigned int n
, tb_page_addr_t page_addr
)
1209 TranslationBlock
*last_first_tb
;
1211 tb
->page_addr
[n
] = page_addr
;
1212 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1213 tb
->page_next
[n
] = p
->first_tb
;
1214 last_first_tb
= p
->first_tb
;
1215 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1216 invalidate_page_bitmap(p
);
1218 #if defined(TARGET_HAS_SMC) || 1
1220 #if defined(CONFIG_USER_ONLY)
1221 if (p
->flags
& PAGE_WRITE
) {
1226 /* force the host page as non writable (writes will have a
1227 page fault + mprotect overhead) */
1228 page_addr
&= qemu_host_page_mask
;
1230 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1231 addr
+= TARGET_PAGE_SIZE
) {
1233 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1237 p2
->flags
&= ~PAGE_WRITE
;
1239 mprotect(g2h(page_addr
), qemu_host_page_size
,
1240 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1241 #ifdef DEBUG_TB_INVALIDATE
1242 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1247 /* if some code is already present, then the pages are already
1248 protected. So we handle the case where only the first TB is
1249 allocated in a physical page */
1250 if (!last_first_tb
) {
1251 tlb_protect_code(page_addr
);
1255 #endif /* TARGET_HAS_SMC */
1258 /* add a new TB and link it to the physical page tables. phys_page2 is
1259 (-1) to indicate that only one page contains the TB. */
1260 void tb_link_page(TranslationBlock
*tb
,
1261 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1264 TranslationBlock
**ptb
;
1266 /* Grab the mmap lock to stop another thread invalidating this TB
1267 before we are done. */
1269 /* add in the physical hash table */
1270 h
= tb_phys_hash_func(phys_pc
);
1271 ptb
= &tb_phys_hash
[h
];
1272 tb
->phys_hash_next
= *ptb
;
1275 /* add in the page list */
1276 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1277 if (phys_page2
!= -1)
1278 tb_alloc_page(tb
, 1, phys_page2
);
1280 tb
->page_addr
[1] = -1;
1282 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1283 tb
->jmp_next
[0] = NULL
;
1284 tb
->jmp_next
[1] = NULL
;
1286 /* init original jump addresses */
1287 if (tb
->tb_next_offset
[0] != 0xffff)
1288 tb_reset_jump(tb
, 0);
1289 if (tb
->tb_next_offset
[1] != 0xffff)
1290 tb_reset_jump(tb
, 1);
1292 #ifdef DEBUG_TB_CHECK
1298 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 tb[1].tc_ptr. Return NULL if not found */
1300 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1302 int m_min
, m_max
, m
;
1304 TranslationBlock
*tb
;
1308 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1309 tc_ptr
>= (unsigned long)code_gen_ptr
)
1311 /* binary search (cf Knuth) */
1314 while (m_min
<= m_max
) {
1315 m
= (m_min
+ m_max
) >> 1;
1317 v
= (unsigned long)tb
->tc_ptr
;
1320 else if (tc_ptr
< v
) {
1329 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1331 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1333 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1336 tb1
= tb
->jmp_next
[n
];
1338 /* find head of list */
1341 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1344 tb1
= tb1
->jmp_next
[n1
];
1346 /* we are now sure now that tb jumps to tb1 */
1349 /* remove tb from the jmp_first list */
1350 ptb
= &tb_next
->jmp_first
;
1354 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1355 if (n1
== n
&& tb1
== tb
)
1357 ptb
= &tb1
->jmp_next
[n1
];
1359 *ptb
= tb
->jmp_next
[n
];
1360 tb
->jmp_next
[n
] = NULL
;
1362 /* suppress the jump to next tb in generated code */
1363 tb_reset_jump(tb
, n
);
1365 /* suppress jumps in the tb on which we could have jumped */
1366 tb_reset_jump_recursive(tb_next
);
1370 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1372 tb_reset_jump_recursive2(tb
, 0);
1373 tb_reset_jump_recursive2(tb
, 1);
1376 #if defined(TARGET_HAS_ICE)
1377 #if defined(CONFIG_USER_ONLY)
1378 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1380 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1383 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1385 target_phys_addr_t addr
;
1387 ram_addr_t ram_addr
;
1390 addr
= cpu_get_phys_page_debug(env
, pc
);
1391 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1393 pd
= IO_MEM_UNASSIGNED
;
1395 pd
= p
->phys_offset
;
1397 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1398 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1401 #endif /* TARGET_HAS_ICE */
1403 #if defined(CONFIG_USER_ONLY)
1404 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1409 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1410 int flags
, CPUWatchpoint
**watchpoint
)
1415 /* Add a watchpoint. */
1416 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1417 int flags
, CPUWatchpoint
**watchpoint
)
1419 target_ulong len_mask
= ~(len
- 1);
1422 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1424 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1425 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1428 wp
= qemu_malloc(sizeof(*wp
));
1431 wp
->len_mask
= len_mask
;
1434 /* keep all GDB-injected watchpoints in front */
1436 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1438 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1440 tlb_flush_page(env
, addr
);
1447 /* Remove a specific watchpoint. */
1448 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1451 target_ulong len_mask
= ~(len
- 1);
1454 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1455 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1456 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1457 cpu_watchpoint_remove_by_ref(env
, wp
);
1464 /* Remove a specific watchpoint by reference. */
1465 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1467 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1469 tlb_flush_page(env
, watchpoint
->vaddr
);
1471 qemu_free(watchpoint
);
1474 /* Remove all matching watchpoints. */
1475 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1477 CPUWatchpoint
*wp
, *next
;
1479 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1480 if (wp
->flags
& mask
)
1481 cpu_watchpoint_remove_by_ref(env
, wp
);
1486 /* Add a breakpoint. */
1487 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1488 CPUBreakpoint
**breakpoint
)
1490 #if defined(TARGET_HAS_ICE)
1493 bp
= qemu_malloc(sizeof(*bp
));
1498 /* keep all GDB-injected breakpoints in front */
1500 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1502 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1504 breakpoint_invalidate(env
, pc
);
1514 /* Remove a specific breakpoint. */
1515 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1517 #if defined(TARGET_HAS_ICE)
1520 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1521 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1522 cpu_breakpoint_remove_by_ref(env
, bp
);
1532 /* Remove a specific breakpoint by reference. */
1533 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1535 #if defined(TARGET_HAS_ICE)
1536 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1538 breakpoint_invalidate(env
, breakpoint
->pc
);
1540 qemu_free(breakpoint
);
1544 /* Remove all matching breakpoints. */
1545 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1547 #if defined(TARGET_HAS_ICE)
1548 CPUBreakpoint
*bp
, *next
;
1550 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1551 if (bp
->flags
& mask
)
1552 cpu_breakpoint_remove_by_ref(env
, bp
);
1557 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 CPU loop after each instruction */
1559 void cpu_single_step(CPUState
*env
, int enabled
)
1561 #if defined(TARGET_HAS_ICE)
1562 if (env
->singlestep_enabled
!= enabled
) {
1563 env
->singlestep_enabled
= enabled
;
1565 kvm_update_guest_debug(env
, 0);
1567 /* must flush all the translated code to avoid inconsistencies */
1568 /* XXX: only flush what is necessary */
1575 /* enable or disable low levels log */
1576 void cpu_set_log(int log_flags
)
1578 loglevel
= log_flags
;
1579 if (loglevel
&& !logfile
) {
1580 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1582 perror(logfilename
);
1585 #if !defined(CONFIG_SOFTMMU)
1586 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1588 static char logfile_buf
[4096];
1589 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1591 #elif !defined(_WIN32)
1592 /* Win32 doesn't support line-buffering and requires size >= 2 */
1593 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1597 if (!loglevel
&& logfile
) {
1603 void cpu_set_log_filename(const char *filename
)
1605 logfilename
= strdup(filename
);
1610 cpu_set_log(loglevel
);
1613 static void cpu_unlink_tb(CPUState
*env
)
1615 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1616 problem and hope the cpu will stop of its own accord. For userspace
1617 emulation this often isn't actually as bad as it sounds. Often
1618 signals are used primarily to interrupt blocking syscalls. */
1619 TranslationBlock
*tb
;
1620 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1622 spin_lock(&interrupt_lock
);
1623 tb
= env
->current_tb
;
1624 /* if the cpu is currently executing code, we must unlink it and
1625 all the potentially executing TB */
1627 env
->current_tb
= NULL
;
1628 tb_reset_jump_recursive(tb
);
1630 spin_unlock(&interrupt_lock
);
1633 /* mask must never be zero, except for A20 change call */
1634 void cpu_interrupt(CPUState
*env
, int mask
)
1638 old_mask
= env
->interrupt_request
;
1639 env
->interrupt_request
|= mask
;
1641 #ifndef CONFIG_USER_ONLY
1643 * If called from iothread context, wake the target cpu in
1646 if (!qemu_cpu_is_self(env
)) {
1653 env
->icount_decr
.u16
.high
= 0xffff;
1654 #ifndef CONFIG_USER_ONLY
1656 && (mask
& ~old_mask
) != 0) {
1657 cpu_abort(env
, "Raised interrupt while not in I/O function");
1665 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1667 env
->interrupt_request
&= ~mask
;
1670 void cpu_exit(CPUState
*env
)
1672 env
->exit_request
= 1;
1676 const CPULogItem cpu_log_items
[] = {
1677 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1678 "show generated host assembly code for each compiled TB" },
1679 { CPU_LOG_TB_IN_ASM
, "in_asm",
1680 "show target assembly code for each compiled TB" },
1681 { CPU_LOG_TB_OP
, "op",
1682 "show micro ops for each compiled TB" },
1683 { CPU_LOG_TB_OP_OPT
, "op_opt",
1686 "before eflags optimization and "
1688 "after liveness analysis" },
1689 { CPU_LOG_INT
, "int",
1690 "show interrupts/exceptions in short format" },
1691 { CPU_LOG_EXEC
, "exec",
1692 "show trace before each executed TB (lots of logs)" },
1693 { CPU_LOG_TB_CPU
, "cpu",
1694 "show CPU state before block translation" },
1696 { CPU_LOG_PCALL
, "pcall",
1697 "show protected mode far calls/returns/exceptions" },
1698 { CPU_LOG_RESET
, "cpu_reset",
1699 "show CPU state before CPU resets" },
1702 { CPU_LOG_IOPORT
, "ioport",
1703 "show all i/o ports accesses" },
1708 #ifndef CONFIG_USER_ONLY
1709 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1710 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1712 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1714 ram_addr_t phys_offset
,
1717 CPUPhysMemoryClient
*client
;
1718 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1719 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1723 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1724 target_phys_addr_t end
)
1726 CPUPhysMemoryClient
*client
;
1727 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1728 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1735 static int cpu_notify_migration_log(int enable
)
1737 CPUPhysMemoryClient
*client
;
1738 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1739 int r
= client
->migration_log(client
, enable
);
1746 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1747 * address. Each intermediate table provides the next L2_BITs of guest
1748 * physical address space. The number of levels vary based on host and
1749 * guest configuration, making it efficient to build the final guest
1750 * physical address by seeding the L1 offset and shifting and adding in
1751 * each L2 offset as we recurse through them. */
1752 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1753 int level
, void **lp
, target_phys_addr_t addr
)
1761 PhysPageDesc
*pd
= *lp
;
1762 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1763 for (i
= 0; i
< L2_SIZE
; ++i
) {
1764 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1765 client
->set_memory(client
, addr
| i
<< TARGET_PAGE_BITS
,
1766 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
, false);
1771 for (i
= 0; i
< L2_SIZE
; ++i
) {
1772 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1773 (addr
<< L2_BITS
) | i
);
1778 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1781 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1782 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1783 l1_phys_map
+ i
, i
);
1787 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1789 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1790 phys_page_for_each(client
);
1793 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1795 QLIST_REMOVE(client
, list
);
1799 static int cmp1(const char *s1
, int n
, const char *s2
)
1801 if (strlen(s2
) != n
)
1803 return memcmp(s1
, s2
, n
) == 0;
1806 /* takes a comma separated list of log masks. Return 0 if error. */
1807 int cpu_str_to_log_mask(const char *str
)
1809 const CPULogItem
*item
;
1816 p1
= strchr(p
, ',');
1819 if(cmp1(p
,p1
-p
,"all")) {
1820 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1824 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1825 if (cmp1(p
, p1
- p
, item
->name
))
1839 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1846 fprintf(stderr
, "qemu: fatal: ");
1847 vfprintf(stderr
, fmt
, ap
);
1848 fprintf(stderr
, "\n");
1850 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1852 cpu_dump_state(env
, stderr
, fprintf
, 0);
1854 if (qemu_log_enabled()) {
1855 qemu_log("qemu: fatal: ");
1856 qemu_log_vprintf(fmt
, ap2
);
1859 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1861 log_cpu_state(env
, 0);
1868 #if defined(CONFIG_USER_ONLY)
1870 struct sigaction act
;
1871 sigfillset(&act
.sa_mask
);
1872 act
.sa_handler
= SIG_DFL
;
1873 sigaction(SIGABRT
, &act
, NULL
);
1879 CPUState
*cpu_copy(CPUState
*env
)
1881 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1882 CPUState
*next_cpu
= new_env
->next_cpu
;
1883 int cpu_index
= new_env
->cpu_index
;
1884 #if defined(TARGET_HAS_ICE)
1889 memcpy(new_env
, env
, sizeof(CPUState
));
1891 /* Preserve chaining and index. */
1892 new_env
->next_cpu
= next_cpu
;
1893 new_env
->cpu_index
= cpu_index
;
1895 /* Clone all break/watchpoints.
1896 Note: Once we support ptrace with hw-debug register access, make sure
1897 BP_CPU break/watchpoints are handled correctly on clone. */
1898 QTAILQ_INIT(&env
->breakpoints
);
1899 QTAILQ_INIT(&env
->watchpoints
);
1900 #if defined(TARGET_HAS_ICE)
1901 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1902 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1904 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1905 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1913 #if !defined(CONFIG_USER_ONLY)
1915 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1919 /* Discard jump cache entries for any tb which might potentially
1920 overlap the flushed page. */
1921 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1922 memset (&env
->tb_jmp_cache
[i
], 0,
1923 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1925 i
= tb_jmp_cache_hash_page(addr
);
1926 memset (&env
->tb_jmp_cache
[i
], 0,
1927 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1930 static CPUTLBEntry s_cputlb_empty_entry
= {
1937 /* NOTE: if flush_global is true, also flush global entries (not
1939 void tlb_flush(CPUState
*env
, int flush_global
)
1943 #if defined(DEBUG_TLB)
1944 printf("tlb_flush:\n");
1946 /* must reset current TB so that interrupts cannot modify the
1947 links while we are modifying them */
1948 env
->current_tb
= NULL
;
1950 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1952 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1953 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1957 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1959 env
->tlb_flush_addr
= -1;
1960 env
->tlb_flush_mask
= 0;
1964 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1966 if (addr
== (tlb_entry
->addr_read
&
1967 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1968 addr
== (tlb_entry
->addr_write
&
1969 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1970 addr
== (tlb_entry
->addr_code
&
1971 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1972 *tlb_entry
= s_cputlb_empty_entry
;
1976 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1981 #if defined(DEBUG_TLB)
1982 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1984 /* Check if we need to flush due to large pages. */
1985 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1986 #if defined(DEBUG_TLB)
1987 printf("tlb_flush_page: forced full flush ("
1988 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1989 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1994 /* must reset current TB so that interrupts cannot modify the
1995 links while we are modifying them */
1996 env
->current_tb
= NULL
;
1998 addr
&= TARGET_PAGE_MASK
;
1999 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2000 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2001 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2003 tlb_flush_jmp_cache(env
, addr
);
2006 /* update the TLBs so that writes to code in the virtual page 'addr'
2008 static void tlb_protect_code(ram_addr_t ram_addr
)
2010 cpu_physical_memory_reset_dirty(ram_addr
,
2011 ram_addr
+ TARGET_PAGE_SIZE
,
2015 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2016 tested for self modifying code */
2017 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2020 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2023 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2024 unsigned long start
, unsigned long length
)
2027 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2028 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2029 if ((addr
- start
) < length
) {
2030 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2035 /* Note: start and end must be within the same ram block. */
2036 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2040 unsigned long length
, start1
;
2043 start
&= TARGET_PAGE_MASK
;
2044 end
= TARGET_PAGE_ALIGN(end
);
2046 length
= end
- start
;
2049 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2051 /* we modify the TLB cache so that the dirty bit will be set again
2052 when accessing the range */
2053 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2054 /* Chek that we don't span multiple blocks - this breaks the
2055 address comparisons below. */
2056 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2057 != (end
- 1) - start
) {
2061 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2063 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2064 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2065 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2071 int cpu_physical_memory_set_dirty_tracking(int enable
)
2074 in_migration
= enable
;
2075 ret
= cpu_notify_migration_log(!!enable
);
2079 int cpu_physical_memory_get_dirty_tracking(void)
2081 return in_migration
;
2084 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2085 target_phys_addr_t end_addr
)
2089 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2093 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2096 CPUPhysMemoryClient
*client
;
2097 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2098 if (client
->log_start
) {
2099 int r
= client
->log_start(client
, start_addr
, size
);
2108 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2111 CPUPhysMemoryClient
*client
;
2112 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2113 if (client
->log_stop
) {
2114 int r
= client
->log_stop(client
, start_addr
, size
);
2123 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2125 ram_addr_t ram_addr
;
2128 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2129 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2130 + tlb_entry
->addend
);
2131 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2132 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2133 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2138 /* update the TLB according to the current state of the dirty bits */
2139 void cpu_tlb_update_dirty(CPUState
*env
)
2143 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2144 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2145 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2149 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2151 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2152 tlb_entry
->addr_write
= vaddr
;
2155 /* update the TLB corresponding to virtual page vaddr
2156 so that it is no longer dirty */
2157 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2162 vaddr
&= TARGET_PAGE_MASK
;
2163 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2164 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2165 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2168 /* Our TLB does not support large pages, so remember the area covered by
2169 large pages and trigger a full TLB flush if these are invalidated. */
2170 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2173 target_ulong mask
= ~(size
- 1);
2175 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2176 env
->tlb_flush_addr
= vaddr
& mask
;
2177 env
->tlb_flush_mask
= mask
;
2180 /* Extend the existing region to include the new page.
2181 This is a compromise between unnecessary flushes and the cost
2182 of maintaining a full variable size TLB. */
2183 mask
&= env
->tlb_flush_mask
;
2184 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2187 env
->tlb_flush_addr
&= mask
;
2188 env
->tlb_flush_mask
= mask
;
2191 /* Add a new TLB entry. At most one entry for a given virtual address
2192 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2193 supplied size is only used by tlb_flush_page. */
2194 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2195 target_phys_addr_t paddr
, int prot
,
2196 int mmu_idx
, target_ulong size
)
2201 target_ulong address
;
2202 target_ulong code_address
;
2203 unsigned long addend
;
2206 target_phys_addr_t iotlb
;
2208 assert(size
>= TARGET_PAGE_SIZE
);
2209 if (size
!= TARGET_PAGE_SIZE
) {
2210 tlb_add_large_page(env
, vaddr
, size
);
2212 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2214 pd
= IO_MEM_UNASSIGNED
;
2216 pd
= p
->phys_offset
;
2218 #if defined(DEBUG_TLB)
2219 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2220 " prot=%x idx=%d pd=0x%08lx\n",
2221 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2225 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2226 /* IO memory case (romd handled later) */
2227 address
|= TLB_MMIO
;
2229 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2230 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2232 iotlb
= pd
& TARGET_PAGE_MASK
;
2233 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2234 iotlb
|= IO_MEM_NOTDIRTY
;
2236 iotlb
|= IO_MEM_ROM
;
2238 /* IO handlers are currently passed a physical address.
2239 It would be nice to pass an offset from the base address
2240 of that region. This would avoid having to special case RAM,
2241 and avoid full address decoding in every device.
2242 We can't use the high bits of pd for this because
2243 IO_MEM_ROMD uses these as a ram address. */
2244 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2246 iotlb
+= p
->region_offset
;
2252 code_address
= address
;
2253 /* Make accesses to pages with watchpoints go via the
2254 watchpoint trap routines. */
2255 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2256 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2257 /* Avoid trapping reads of pages with a write breakpoint. */
2258 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2259 iotlb
= io_mem_watch
+ paddr
;
2260 address
|= TLB_MMIO
;
2266 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2267 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2268 te
= &env
->tlb_table
[mmu_idx
][index
];
2269 te
->addend
= addend
- vaddr
;
2270 if (prot
& PAGE_READ
) {
2271 te
->addr_read
= address
;
2276 if (prot
& PAGE_EXEC
) {
2277 te
->addr_code
= code_address
;
2281 if (prot
& PAGE_WRITE
) {
2282 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2283 (pd
& IO_MEM_ROMD
)) {
2284 /* Write access calls the I/O callback. */
2285 te
->addr_write
= address
| TLB_MMIO
;
2286 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2287 !cpu_physical_memory_is_dirty(pd
)) {
2288 te
->addr_write
= address
| TLB_NOTDIRTY
;
2290 te
->addr_write
= address
;
2293 te
->addr_write
= -1;
2299 void tlb_flush(CPUState
*env
, int flush_global
)
2303 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2308 * Walks guest process memory "regions" one by one
2309 * and calls callback function 'fn' for each region.
2312 struct walk_memory_regions_data
2314 walk_memory_regions_fn fn
;
2316 unsigned long start
;
2320 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2321 abi_ulong end
, int new_prot
)
2323 if (data
->start
!= -1ul) {
2324 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2330 data
->start
= (new_prot
? end
: -1ul);
2331 data
->prot
= new_prot
;
2336 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2337 abi_ulong base
, int level
, void **lp
)
2343 return walk_memory_regions_end(data
, base
, 0);
2348 for (i
= 0; i
< L2_SIZE
; ++i
) {
2349 int prot
= pd
[i
].flags
;
2351 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2352 if (prot
!= data
->prot
) {
2353 rc
= walk_memory_regions_end(data
, pa
, prot
);
2361 for (i
= 0; i
< L2_SIZE
; ++i
) {
2362 pa
= base
| ((abi_ulong
)i
<<
2363 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2364 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2374 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2376 struct walk_memory_regions_data data
;
2384 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2385 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2386 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2392 return walk_memory_regions_end(&data
, 0, 0);
2395 static int dump_region(void *priv
, abi_ulong start
,
2396 abi_ulong end
, unsigned long prot
)
2398 FILE *f
= (FILE *)priv
;
2400 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2401 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2402 start
, end
, end
- start
,
2403 ((prot
& PAGE_READ
) ? 'r' : '-'),
2404 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2405 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2410 /* dump memory mappings */
2411 void page_dump(FILE *f
)
2413 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2414 "start", "end", "size", "prot");
2415 walk_memory_regions(f
, dump_region
);
2418 int page_get_flags(target_ulong address
)
2422 p
= page_find(address
>> TARGET_PAGE_BITS
);
2428 /* Modify the flags of a page and invalidate the code if necessary.
2429 The flag PAGE_WRITE_ORG is positioned automatically depending
2430 on PAGE_WRITE. The mmap_lock should already be held. */
2431 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2433 target_ulong addr
, len
;
2435 /* This function should never be called with addresses outside the
2436 guest address space. If this assert fires, it probably indicates
2437 a missing call to h2g_valid. */
2438 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2439 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2441 assert(start
< end
);
2443 start
= start
& TARGET_PAGE_MASK
;
2444 end
= TARGET_PAGE_ALIGN(end
);
2446 if (flags
& PAGE_WRITE
) {
2447 flags
|= PAGE_WRITE_ORG
;
2450 for (addr
= start
, len
= end
- start
;
2452 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2453 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2455 /* If the write protection bit is set, then we invalidate
2457 if (!(p
->flags
& PAGE_WRITE
) &&
2458 (flags
& PAGE_WRITE
) &&
2460 tb_invalidate_phys_page(addr
, 0, NULL
);
2466 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2472 /* This function should never be called with addresses outside the
2473 guest address space. If this assert fires, it probably indicates
2474 a missing call to h2g_valid. */
2475 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2476 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2482 if (start
+ len
- 1 < start
) {
2483 /* We've wrapped around. */
2487 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2488 start
= start
& TARGET_PAGE_MASK
;
2490 for (addr
= start
, len
= end
- start
;
2492 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2493 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2496 if( !(p
->flags
& PAGE_VALID
) )
2499 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2501 if (flags
& PAGE_WRITE
) {
2502 if (!(p
->flags
& PAGE_WRITE_ORG
))
2504 /* unprotect the page if it was put read-only because it
2505 contains translated code */
2506 if (!(p
->flags
& PAGE_WRITE
)) {
2507 if (!page_unprotect(addr
, 0, NULL
))
2516 /* called from signal handler: invalidate the code and unprotect the
2517 page. Return TRUE if the fault was successfully handled. */
2518 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2522 target_ulong host_start
, host_end
, addr
;
2524 /* Technically this isn't safe inside a signal handler. However we
2525 know this only ever happens in a synchronous SEGV handler, so in
2526 practice it seems to be ok. */
2529 p
= page_find(address
>> TARGET_PAGE_BITS
);
2535 /* if the page was really writable, then we change its
2536 protection back to writable */
2537 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2538 host_start
= address
& qemu_host_page_mask
;
2539 host_end
= host_start
+ qemu_host_page_size
;
2542 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2543 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2544 p
->flags
|= PAGE_WRITE
;
2547 /* and since the content will be modified, we must invalidate
2548 the corresponding translated code. */
2549 tb_invalidate_phys_page(addr
, pc
, puc
);
2550 #ifdef DEBUG_TB_CHECK
2551 tb_invalidate_check(addr
);
2554 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2564 static inline void tlb_set_dirty(CPUState
*env
,
2565 unsigned long addr
, target_ulong vaddr
)
2568 #endif /* defined(CONFIG_USER_ONLY) */
2570 #if !defined(CONFIG_USER_ONLY)
2572 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2573 typedef struct subpage_t
{
2574 target_phys_addr_t base
;
2575 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2576 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2579 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2580 ram_addr_t memory
, ram_addr_t region_offset
);
2581 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2582 ram_addr_t orig_memory
,
2583 ram_addr_t region_offset
);
2584 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2587 if (addr > start_addr) \
2590 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2591 if (start_addr2 > 0) \
2595 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2596 end_addr2 = TARGET_PAGE_SIZE - 1; \
2598 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2599 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2604 /* register physical memory.
2605 For RAM, 'size' must be a multiple of the target page size.
2606 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2607 io memory page. The address used when calling the IO function is
2608 the offset from the start of the region, plus region_offset. Both
2609 start_addr and region_offset are rounded down to a page boundary
2610 before calculating this offset. This should not be a problem unless
2611 the low bits of start_addr and region_offset differ. */
2612 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2614 ram_addr_t phys_offset
,
2615 ram_addr_t region_offset
,
2618 target_phys_addr_t addr
, end_addr
;
2621 ram_addr_t orig_size
= size
;
2624 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2626 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2627 region_offset
= start_addr
;
2629 region_offset
&= TARGET_PAGE_MASK
;
2630 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2631 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2632 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2633 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2634 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2635 ram_addr_t orig_memory
= p
->phys_offset
;
2636 target_phys_addr_t start_addr2
, end_addr2
;
2637 int need_subpage
= 0;
2639 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2642 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2643 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2644 &p
->phys_offset
, orig_memory
,
2647 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2650 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2652 p
->region_offset
= 0;
2654 p
->phys_offset
= phys_offset
;
2655 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2656 (phys_offset
& IO_MEM_ROMD
))
2657 phys_offset
+= TARGET_PAGE_SIZE
;
2660 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2661 p
->phys_offset
= phys_offset
;
2662 p
->region_offset
= region_offset
;
2663 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2664 (phys_offset
& IO_MEM_ROMD
)) {
2665 phys_offset
+= TARGET_PAGE_SIZE
;
2667 target_phys_addr_t start_addr2
, end_addr2
;
2668 int need_subpage
= 0;
2670 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2671 end_addr2
, need_subpage
);
2674 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2675 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2676 addr
& TARGET_PAGE_MASK
);
2677 subpage_register(subpage
, start_addr2
, end_addr2
,
2678 phys_offset
, region_offset
);
2679 p
->region_offset
= 0;
2683 region_offset
+= TARGET_PAGE_SIZE
;
2686 /* since each CPU stores ram addresses in its TLB cache, we must
2687 reset the modified entries */
2689 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2694 /* XXX: temporary until new memory mapping API */
2695 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2699 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2701 return IO_MEM_UNASSIGNED
;
2702 return p
->phys_offset
;
2705 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2708 kvm_coalesce_mmio_region(addr
, size
);
2711 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2714 kvm_uncoalesce_mmio_region(addr
, size
);
2717 void qemu_flush_coalesced_mmio_buffer(void)
2720 kvm_flush_coalesced_mmio_buffer();
2723 #if defined(__linux__) && !defined(TARGET_S390X)
2725 #include <sys/vfs.h>
2727 #define HUGETLBFS_MAGIC 0x958458f6
2729 static long gethugepagesize(const char *path
)
2735 ret
= statfs(path
, &fs
);
2736 } while (ret
!= 0 && errno
== EINTR
);
2743 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2744 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2749 static void *file_ram_alloc(RAMBlock
*block
,
2759 unsigned long hpagesize
;
2761 hpagesize
= gethugepagesize(path
);
2766 if (memory
< hpagesize
) {
2770 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2771 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2775 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2779 fd
= mkstemp(filename
);
2781 perror("unable to create backing store for hugepages");
2788 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2791 * ftruncate is not supported by hugetlbfs in older
2792 * hosts, so don't bother bailing out on errors.
2793 * If anything goes wrong with it under other filesystems,
2796 if (ftruncate(fd
, memory
))
2797 perror("ftruncate");
2800 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2801 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2802 * to sidestep this quirk.
2804 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2805 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2807 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2809 if (area
== MAP_FAILED
) {
2810 perror("file_ram_alloc: can't mmap RAM pages");
2819 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2821 RAMBlock
*block
, *next_block
;
2822 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2824 if (QLIST_EMPTY(&ram_list
.blocks
))
2827 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2828 ram_addr_t end
, next
= ULONG_MAX
;
2830 end
= block
->offset
+ block
->length
;
2832 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2833 if (next_block
->offset
>= end
) {
2834 next
= MIN(next
, next_block
->offset
);
2837 if (next
- end
>= size
&& next
- end
< mingap
) {
2839 mingap
= next
- end
;
2845 static ram_addr_t
last_ram_offset(void)
2848 ram_addr_t last
= 0;
2850 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2851 last
= MAX(last
, block
->offset
+ block
->length
);
2856 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2857 ram_addr_t size
, void *host
)
2859 RAMBlock
*new_block
, *block
;
2861 size
= TARGET_PAGE_ALIGN(size
);
2862 new_block
= qemu_mallocz(sizeof(*new_block
));
2864 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2865 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2867 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2871 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2873 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2874 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2875 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2882 new_block
->host
= host
;
2883 new_block
->flags
|= RAM_PREALLOC_MASK
;
2886 #if defined (__linux__) && !defined(TARGET_S390X)
2887 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2888 if (!new_block
->host
) {
2889 new_block
->host
= qemu_vmalloc(size
);
2890 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2893 fprintf(stderr
, "-mem-path option unsupported\n");
2897 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2898 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2899 new_block
->host
= mmap((void*)0x1000000, size
,
2900 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2901 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2903 new_block
->host
= qemu_vmalloc(size
);
2905 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2909 new_block
->offset
= find_ram_offset(size
);
2910 new_block
->length
= size
;
2912 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2914 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2915 last_ram_offset() >> TARGET_PAGE_BITS
);
2916 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2917 0xff, size
>> TARGET_PAGE_BITS
);
2920 kvm_setup_guest_memory(new_block
->host
, size
);
2922 return new_block
->offset
;
2925 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2927 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2930 void qemu_ram_free(ram_addr_t addr
)
2934 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2935 if (addr
== block
->offset
) {
2936 QLIST_REMOVE(block
, next
);
2937 if (block
->flags
& RAM_PREALLOC_MASK
) {
2939 } else if (mem_path
) {
2940 #if defined (__linux__) && !defined(TARGET_S390X)
2942 munmap(block
->host
, block
->length
);
2945 qemu_vfree(block
->host
);
2951 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2952 munmap(block
->host
, block
->length
);
2954 qemu_vfree(block
->host
);
2965 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2972 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2973 offset
= addr
- block
->offset
;
2974 if (offset
< block
->length
) {
2975 vaddr
= block
->host
+ offset
;
2976 if (block
->flags
& RAM_PREALLOC_MASK
) {
2980 munmap(vaddr
, length
);
2982 #if defined(__linux__) && !defined(TARGET_S390X)
2985 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2988 flags
|= MAP_PRIVATE
;
2990 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2991 flags
, block
->fd
, offset
);
2993 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2994 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3001 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3002 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3003 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3006 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3007 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3011 if (area
!= vaddr
) {
3012 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3016 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3022 #endif /* !_WIN32 */
3024 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3025 With the exception of the softmmu code in this file, this should
3026 only be used for local memory (e.g. video ram) that the device owns,
3027 and knows it isn't going to access beyond the end of the block.
3029 It should not be used for general purpose DMA.
3030 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3032 void *qemu_get_ram_ptr(ram_addr_t addr
)
3036 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3037 if (addr
- block
->offset
< block
->length
) {
3038 /* Move this entry to to start of the list. */
3039 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3040 QLIST_REMOVE(block
, next
);
3041 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3043 return block
->host
+ (addr
- block
->offset
);
3047 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3053 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3054 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3056 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3060 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3061 if (addr
- block
->offset
< block
->length
) {
3062 return block
->host
+ (addr
- block
->offset
);
3066 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3072 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3075 uint8_t *host
= ptr
;
3077 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3078 if (host
- block
->host
< block
->length
) {
3079 *ram_addr
= block
->offset
+ (host
- block
->host
);
3086 /* Some of the softmmu routines need to translate from a host pointer
3087 (typically a TLB entry) back to a ram offset. */
3088 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3090 ram_addr_t ram_addr
;
3092 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3093 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3099 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3101 #ifdef DEBUG_UNASSIGNED
3102 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3104 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3105 do_unassigned_access(addr
, 0, 0, 0, 1);
3110 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3112 #ifdef DEBUG_UNASSIGNED
3113 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3115 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3116 do_unassigned_access(addr
, 0, 0, 0, 2);
3121 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3123 #ifdef DEBUG_UNASSIGNED
3124 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3126 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3127 do_unassigned_access(addr
, 0, 0, 0, 4);
3132 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3134 #ifdef DEBUG_UNASSIGNED
3135 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3137 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3138 do_unassigned_access(addr
, 1, 0, 0, 1);
3142 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3144 #ifdef DEBUG_UNASSIGNED
3145 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3147 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3148 do_unassigned_access(addr
, 1, 0, 0, 2);
3152 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3154 #ifdef DEBUG_UNASSIGNED
3155 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3157 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3158 do_unassigned_access(addr
, 1, 0, 0, 4);
3162 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3163 unassigned_mem_readb
,
3164 unassigned_mem_readw
,
3165 unassigned_mem_readl
,
3168 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3169 unassigned_mem_writeb
,
3170 unassigned_mem_writew
,
3171 unassigned_mem_writel
,
3174 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3178 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3179 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3180 #if !defined(CONFIG_USER_ONLY)
3181 tb_invalidate_phys_page_fast(ram_addr
, 1);
3182 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3185 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3186 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3187 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3188 /* we remove the notdirty callback only if the code has been
3190 if (dirty_flags
== 0xff)
3191 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3194 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3198 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3199 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3200 #if !defined(CONFIG_USER_ONLY)
3201 tb_invalidate_phys_page_fast(ram_addr
, 2);
3202 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3205 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3206 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3207 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3208 /* we remove the notdirty callback only if the code has been
3210 if (dirty_flags
== 0xff)
3211 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3214 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3218 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3219 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3220 #if !defined(CONFIG_USER_ONLY)
3221 tb_invalidate_phys_page_fast(ram_addr
, 4);
3222 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3225 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3226 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3227 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3228 /* we remove the notdirty callback only if the code has been
3230 if (dirty_flags
== 0xff)
3231 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3234 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3235 NULL
, /* never used */
3236 NULL
, /* never used */
3237 NULL
, /* never used */
3240 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3241 notdirty_mem_writeb
,
3242 notdirty_mem_writew
,
3243 notdirty_mem_writel
,
3246 /* Generate a debug exception if a watchpoint has been hit. */
3247 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3249 CPUState
*env
= cpu_single_env
;
3250 target_ulong pc
, cs_base
;
3251 TranslationBlock
*tb
;
3256 if (env
->watchpoint_hit
) {
3257 /* We re-entered the check after replacing the TB. Now raise
3258 * the debug interrupt so that is will trigger after the
3259 * current instruction. */
3260 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3263 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3264 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3265 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3266 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3267 wp
->flags
|= BP_WATCHPOINT_HIT
;
3268 if (!env
->watchpoint_hit
) {
3269 env
->watchpoint_hit
= wp
;
3270 tb
= tb_find_pc(env
->mem_io_pc
);
3272 cpu_abort(env
, "check_watchpoint: could not find TB for "
3273 "pc=%p", (void *)env
->mem_io_pc
);
3275 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3276 tb_phys_invalidate(tb
, -1);
3277 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3278 env
->exception_index
= EXCP_DEBUG
;
3280 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3281 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3283 cpu_resume_from_signal(env
, NULL
);
3286 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3291 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3292 so these check for a hit then pass through to the normal out-of-line
3294 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3296 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3297 return ldub_phys(addr
);
3300 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3302 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3303 return lduw_phys(addr
);
3306 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3308 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3309 return ldl_phys(addr
);
3312 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3315 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3316 stb_phys(addr
, val
);
3319 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3322 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3323 stw_phys(addr
, val
);
3326 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3329 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3330 stl_phys(addr
, val
);
3333 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3339 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3345 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3346 target_phys_addr_t addr
,
3349 unsigned int idx
= SUBPAGE_IDX(addr
);
3350 #if defined(DEBUG_SUBPAGE)
3351 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3352 mmio
, len
, addr
, idx
);
3355 addr
+= mmio
->region_offset
[idx
];
3356 idx
= mmio
->sub_io_index
[idx
];
3357 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3360 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3361 uint32_t value
, unsigned int len
)
3363 unsigned int idx
= SUBPAGE_IDX(addr
);
3364 #if defined(DEBUG_SUBPAGE)
3365 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3366 __func__
, mmio
, len
, addr
, idx
, value
);
3369 addr
+= mmio
->region_offset
[idx
];
3370 idx
= mmio
->sub_io_index
[idx
];
3371 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3374 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3376 return subpage_readlen(opaque
, addr
, 0);
3379 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3382 subpage_writelen(opaque
, addr
, value
, 0);
3385 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3387 return subpage_readlen(opaque
, addr
, 1);
3390 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3393 subpage_writelen(opaque
, addr
, value
, 1);
3396 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3398 return subpage_readlen(opaque
, addr
, 2);
3401 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3404 subpage_writelen(opaque
, addr
, value
, 2);
3407 static CPUReadMemoryFunc
* const subpage_read
[] = {
3413 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3419 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3420 ram_addr_t memory
, ram_addr_t region_offset
)
3424 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3426 idx
= SUBPAGE_IDX(start
);
3427 eidx
= SUBPAGE_IDX(end
);
3428 #if defined(DEBUG_SUBPAGE)
3429 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3430 mmio
, start
, end
, idx
, eidx
, memory
);
3432 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3433 memory
= IO_MEM_UNASSIGNED
;
3434 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3435 for (; idx
<= eidx
; idx
++) {
3436 mmio
->sub_io_index
[idx
] = memory
;
3437 mmio
->region_offset
[idx
] = region_offset
;
3443 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3444 ram_addr_t orig_memory
,
3445 ram_addr_t region_offset
)
3450 mmio
= qemu_mallocz(sizeof(subpage_t
));
3453 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3454 DEVICE_NATIVE_ENDIAN
);
3455 #if defined(DEBUG_SUBPAGE)
3456 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3457 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3459 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3460 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3465 static int get_free_io_mem_idx(void)
3469 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3470 if (!io_mem_used
[i
]) {
3474 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3479 * Usually, devices operate in little endian mode. There are devices out
3480 * there that operate in big endian too. Each device gets byte swapped
3481 * mmio if plugged onto a CPU that does the other endianness.
3491 typedef struct SwapEndianContainer
{
3492 CPUReadMemoryFunc
*read
[3];
3493 CPUWriteMemoryFunc
*write
[3];
3495 } SwapEndianContainer
;
3497 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3500 SwapEndianContainer
*c
= opaque
;
3501 val
= c
->read
[0](c
->opaque
, addr
);
3505 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3508 SwapEndianContainer
*c
= opaque
;
3509 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3513 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3516 SwapEndianContainer
*c
= opaque
;
3517 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3521 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3522 swapendian_mem_readb
,
3523 swapendian_mem_readw
,
3524 swapendian_mem_readl
3527 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3530 SwapEndianContainer
*c
= opaque
;
3531 c
->write
[0](c
->opaque
, addr
, val
);
3534 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3537 SwapEndianContainer
*c
= opaque
;
3538 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3541 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3544 SwapEndianContainer
*c
= opaque
;
3545 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3548 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3549 swapendian_mem_writeb
,
3550 swapendian_mem_writew
,
3551 swapendian_mem_writel
3554 static void swapendian_init(int io_index
)
3556 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3559 /* Swap mmio for big endian targets */
3560 c
->opaque
= io_mem_opaque
[io_index
];
3561 for (i
= 0; i
< 3; i
++) {
3562 c
->read
[i
] = io_mem_read
[io_index
][i
];
3563 c
->write
[i
] = io_mem_write
[io_index
][i
];
3565 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3566 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3568 io_mem_opaque
[io_index
] = c
;
3571 static void swapendian_del(int io_index
)
3573 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3574 qemu_free(io_mem_opaque
[io_index
]);
3578 /* mem_read and mem_write are arrays of functions containing the
3579 function to access byte (index 0), word (index 1) and dword (index
3580 2). Functions can be omitted with a NULL function pointer.
3581 If io_index is non zero, the corresponding io zone is
3582 modified. If it is zero, a new io zone is allocated. The return
3583 value can be used with cpu_register_physical_memory(). (-1) is
3584 returned if error. */
3585 static int cpu_register_io_memory_fixed(int io_index
,
3586 CPUReadMemoryFunc
* const *mem_read
,
3587 CPUWriteMemoryFunc
* const *mem_write
,
3588 void *opaque
, enum device_endian endian
)
3592 if (io_index
<= 0) {
3593 io_index
= get_free_io_mem_idx();
3597 io_index
>>= IO_MEM_SHIFT
;
3598 if (io_index
>= IO_MEM_NB_ENTRIES
)
3602 for (i
= 0; i
< 3; ++i
) {
3603 io_mem_read
[io_index
][i
]
3604 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3606 for (i
= 0; i
< 3; ++i
) {
3607 io_mem_write
[io_index
][i
]
3608 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3610 io_mem_opaque
[io_index
] = opaque
;
3613 case DEVICE_BIG_ENDIAN
:
3614 #ifndef TARGET_WORDS_BIGENDIAN
3615 swapendian_init(io_index
);
3618 case DEVICE_LITTLE_ENDIAN
:
3619 #ifdef TARGET_WORDS_BIGENDIAN
3620 swapendian_init(io_index
);
3623 case DEVICE_NATIVE_ENDIAN
:
3628 return (io_index
<< IO_MEM_SHIFT
);
3631 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3632 CPUWriteMemoryFunc
* const *mem_write
,
3633 void *opaque
, enum device_endian endian
)
3635 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3638 void cpu_unregister_io_memory(int io_table_address
)
3641 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3643 swapendian_del(io_index
);
3645 for (i
=0;i
< 3; i
++) {
3646 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3647 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3649 io_mem_opaque
[io_index
] = NULL
;
3650 io_mem_used
[io_index
] = 0;
3653 static void io_mem_init(void)
3657 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3658 unassigned_mem_write
, NULL
,
3659 DEVICE_NATIVE_ENDIAN
);
3660 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3661 unassigned_mem_write
, NULL
,
3662 DEVICE_NATIVE_ENDIAN
);
3663 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3664 notdirty_mem_write
, NULL
,
3665 DEVICE_NATIVE_ENDIAN
);
3669 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3670 watch_mem_write
, NULL
,
3671 DEVICE_NATIVE_ENDIAN
);
3674 #endif /* !defined(CONFIG_USER_ONLY) */
3676 /* physical memory access (slow version, mainly for debug) */
3677 #if defined(CONFIG_USER_ONLY)
3678 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3679 uint8_t *buf
, int len
, int is_write
)
3686 page
= addr
& TARGET_PAGE_MASK
;
3687 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3690 flags
= page_get_flags(page
);
3691 if (!(flags
& PAGE_VALID
))
3694 if (!(flags
& PAGE_WRITE
))
3696 /* XXX: this code should not depend on lock_user */
3697 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3700 unlock_user(p
, addr
, l
);
3702 if (!(flags
& PAGE_READ
))
3704 /* XXX: this code should not depend on lock_user */
3705 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3708 unlock_user(p
, addr
, 0);
3718 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3719 int len
, int is_write
)
3724 target_phys_addr_t page
;
3729 page
= addr
& TARGET_PAGE_MASK
;
3730 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3733 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3735 pd
= IO_MEM_UNASSIGNED
;
3737 pd
= p
->phys_offset
;
3741 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3742 target_phys_addr_t addr1
= addr
;
3743 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3745 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3746 /* XXX: could force cpu_single_env to NULL to avoid
3748 if (l
>= 4 && ((addr1
& 3) == 0)) {
3749 /* 32 bit write access */
3751 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3753 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3754 /* 16 bit write access */
3756 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3759 /* 8 bit write access */
3761 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3765 unsigned long addr1
;
3766 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3768 ptr
= qemu_get_ram_ptr(addr1
);
3769 memcpy(ptr
, buf
, l
);
3770 if (!cpu_physical_memory_is_dirty(addr1
)) {
3771 /* invalidate code */
3772 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3774 cpu_physical_memory_set_dirty_flags(
3775 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3779 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3780 !(pd
& IO_MEM_ROMD
)) {
3781 target_phys_addr_t addr1
= addr
;
3783 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3785 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3786 if (l
>= 4 && ((addr1
& 3) == 0)) {
3787 /* 32 bit read access */
3788 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3791 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3792 /* 16 bit read access */
3793 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3797 /* 8 bit read access */
3798 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3804 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3805 (addr
& ~TARGET_PAGE_MASK
);
3806 memcpy(buf
, ptr
, l
);
3815 /* used for ROM loading : can write in RAM and ROM */
3816 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3817 const uint8_t *buf
, int len
)
3821 target_phys_addr_t page
;
3826 page
= addr
& TARGET_PAGE_MASK
;
3827 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3830 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3832 pd
= IO_MEM_UNASSIGNED
;
3834 pd
= p
->phys_offset
;
3837 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3838 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3839 !(pd
& IO_MEM_ROMD
)) {
3842 unsigned long addr1
;
3843 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3845 ptr
= qemu_get_ram_ptr(addr1
);
3846 memcpy(ptr
, buf
, l
);
3856 target_phys_addr_t addr
;
3857 target_phys_addr_t len
;
3860 static BounceBuffer bounce
;
3862 typedef struct MapClient
{
3864 void (*callback
)(void *opaque
);
3865 QLIST_ENTRY(MapClient
) link
;
3868 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3869 = QLIST_HEAD_INITIALIZER(map_client_list
);
3871 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3873 MapClient
*client
= qemu_malloc(sizeof(*client
));
3875 client
->opaque
= opaque
;
3876 client
->callback
= callback
;
3877 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3881 void cpu_unregister_map_client(void *_client
)
3883 MapClient
*client
= (MapClient
*)_client
;
3885 QLIST_REMOVE(client
, link
);
3889 static void cpu_notify_map_clients(void)
3893 while (!QLIST_EMPTY(&map_client_list
)) {
3894 client
= QLIST_FIRST(&map_client_list
);
3895 client
->callback(client
->opaque
);
3896 cpu_unregister_map_client(client
);
3900 /* Map a physical memory region into a host virtual address.
3901 * May map a subset of the requested range, given by and returned in *plen.
3902 * May return NULL if resources needed to perform the mapping are exhausted.
3903 * Use only for reads OR writes - not for read-modify-write operations.
3904 * Use cpu_register_map_client() to know when retrying the map operation is
3905 * likely to succeed.
3907 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3908 target_phys_addr_t
*plen
,
3911 target_phys_addr_t len
= *plen
;
3912 target_phys_addr_t done
= 0;
3914 uint8_t *ret
= NULL
;
3916 target_phys_addr_t page
;
3919 unsigned long addr1
;
3922 page
= addr
& TARGET_PAGE_MASK
;
3923 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3926 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3928 pd
= IO_MEM_UNASSIGNED
;
3930 pd
= p
->phys_offset
;
3933 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3934 if (done
|| bounce
.buffer
) {
3937 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3941 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3943 ptr
= bounce
.buffer
;
3945 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3946 ptr
= qemu_get_ram_ptr(addr1
);
3950 } else if (ret
+ done
!= ptr
) {
3962 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3963 * Will also mark the memory as dirty if is_write == 1. access_len gives
3964 * the amount of memory that was actually read or written by the caller.
3966 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3967 int is_write
, target_phys_addr_t access_len
)
3969 if (buffer
!= bounce
.buffer
) {
3971 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3972 while (access_len
) {
3974 l
= TARGET_PAGE_SIZE
;
3977 if (!cpu_physical_memory_is_dirty(addr1
)) {
3978 /* invalidate code */
3979 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3981 cpu_physical_memory_set_dirty_flags(
3982 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3991 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3993 qemu_vfree(bounce
.buffer
);
3994 bounce
.buffer
= NULL
;
3995 cpu_notify_map_clients();
3998 /* warning: addr must be aligned */
3999 uint32_t ldl_phys(target_phys_addr_t addr
)
4007 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4009 pd
= IO_MEM_UNASSIGNED
;
4011 pd
= p
->phys_offset
;
4014 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4015 !(pd
& IO_MEM_ROMD
)) {
4017 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4019 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4020 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4023 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4024 (addr
& ~TARGET_PAGE_MASK
);
4030 /* warning: addr must be aligned */
4031 uint64_t ldq_phys(target_phys_addr_t addr
)
4039 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4041 pd
= IO_MEM_UNASSIGNED
;
4043 pd
= p
->phys_offset
;
4046 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4047 !(pd
& IO_MEM_ROMD
)) {
4049 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4051 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4052 #ifdef TARGET_WORDS_BIGENDIAN
4053 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4054 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4056 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4057 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4061 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4062 (addr
& ~TARGET_PAGE_MASK
);
4069 uint32_t ldub_phys(target_phys_addr_t addr
)
4072 cpu_physical_memory_read(addr
, &val
, 1);
4076 /* warning: addr must be aligned */
4077 uint32_t lduw_phys(target_phys_addr_t addr
)
4085 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4087 pd
= IO_MEM_UNASSIGNED
;
4089 pd
= p
->phys_offset
;
4092 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4093 !(pd
& IO_MEM_ROMD
)) {
4095 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4097 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4098 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4101 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4102 (addr
& ~TARGET_PAGE_MASK
);
4108 /* warning: addr must be aligned. The ram page is not masked as dirty
4109 and the code inside is not invalidated. It is useful if the dirty
4110 bits are used to track modified PTEs */
4111 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4118 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4120 pd
= IO_MEM_UNASSIGNED
;
4122 pd
= p
->phys_offset
;
4125 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4126 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4128 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4129 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4131 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4132 ptr
= qemu_get_ram_ptr(addr1
);
4135 if (unlikely(in_migration
)) {
4136 if (!cpu_physical_memory_is_dirty(addr1
)) {
4137 /* invalidate code */
4138 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4140 cpu_physical_memory_set_dirty_flags(
4141 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4147 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4154 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4156 pd
= IO_MEM_UNASSIGNED
;
4158 pd
= p
->phys_offset
;
4161 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4162 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4164 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4165 #ifdef TARGET_WORDS_BIGENDIAN
4166 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4167 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4169 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4170 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4173 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4174 (addr
& ~TARGET_PAGE_MASK
);
4179 /* warning: addr must be aligned */
4180 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4187 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4189 pd
= IO_MEM_UNASSIGNED
;
4191 pd
= p
->phys_offset
;
4194 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4195 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4197 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4198 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4200 unsigned long addr1
;
4201 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4203 ptr
= qemu_get_ram_ptr(addr1
);
4205 if (!cpu_physical_memory_is_dirty(addr1
)) {
4206 /* invalidate code */
4207 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4209 cpu_physical_memory_set_dirty_flags(addr1
,
4210 (0xff & ~CODE_DIRTY_FLAG
));
4216 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4219 cpu_physical_memory_write(addr
, &v
, 1);
4222 /* warning: addr must be aligned */
4223 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4230 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4232 pd
= IO_MEM_UNASSIGNED
;
4234 pd
= p
->phys_offset
;
4237 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4238 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4240 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4241 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4243 unsigned long addr1
;
4244 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4246 ptr
= qemu_get_ram_ptr(addr1
);
4248 if (!cpu_physical_memory_is_dirty(addr1
)) {
4249 /* invalidate code */
4250 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4252 cpu_physical_memory_set_dirty_flags(addr1
,
4253 (0xff & ~CODE_DIRTY_FLAG
));
4259 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4262 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
4265 /* virtual memory access for debug (includes writing to ROM) */
4266 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4267 uint8_t *buf
, int len
, int is_write
)
4270 target_phys_addr_t phys_addr
;
4274 page
= addr
& TARGET_PAGE_MASK
;
4275 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4276 /* if no physical page mapped, return an error */
4277 if (phys_addr
== -1)
4279 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4282 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4284 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4286 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4295 /* in deterministic execution mode, instructions doing device I/Os
4296 must be at the end of the TB */
4297 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4299 TranslationBlock
*tb
;
4301 target_ulong pc
, cs_base
;
4304 tb
= tb_find_pc((unsigned long)retaddr
);
4306 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4309 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4310 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
4311 /* Calculate how many instructions had been executed before the fault
4313 n
= n
- env
->icount_decr
.u16
.low
;
4314 /* Generate a new TB ending on the I/O insn. */
4316 /* On MIPS and SH, delay slot instructions can only be restarted if
4317 they were already the first instruction in the TB. If this is not
4318 the first instruction in a TB then re-execute the preceding
4320 #if defined(TARGET_MIPS)
4321 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4322 env
->active_tc
.PC
-= 4;
4323 env
->icount_decr
.u16
.low
++;
4324 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4326 #elif defined(TARGET_SH4)
4327 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4330 env
->icount_decr
.u16
.low
++;
4331 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4334 /* This should never happen. */
4335 if (n
> CF_COUNT_MASK
)
4336 cpu_abort(env
, "TB too big during recompile");
4338 cflags
= n
| CF_LAST_IO
;
4340 cs_base
= tb
->cs_base
;
4342 tb_phys_invalidate(tb
, -1);
4343 /* FIXME: In theory this could raise an exception. In practice
4344 we have already translated the block once so it's probably ok. */
4345 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4346 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4347 the first in the TB) then we end up generating a whole new TB and
4348 repeating the fault, which is horribly inefficient.
4349 Better would be to execute just this insn uncached, or generate a
4351 cpu_resume_from_signal(env
, NULL
);
4354 #if !defined(CONFIG_USER_ONLY)
4356 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4358 int i
, target_code_size
, max_target_code_size
;
4359 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4360 TranslationBlock
*tb
;
4362 target_code_size
= 0;
4363 max_target_code_size
= 0;
4365 direct_jmp_count
= 0;
4366 direct_jmp2_count
= 0;
4367 for(i
= 0; i
< nb_tbs
; i
++) {
4369 target_code_size
+= tb
->size
;
4370 if (tb
->size
> max_target_code_size
)
4371 max_target_code_size
= tb
->size
;
4372 if (tb
->page_addr
[1] != -1)
4374 if (tb
->tb_next_offset
[0] != 0xffff) {
4376 if (tb
->tb_next_offset
[1] != 0xffff) {
4377 direct_jmp2_count
++;
4381 /* XXX: avoid using doubles ? */
4382 cpu_fprintf(f
, "Translation buffer state:\n");
4383 cpu_fprintf(f
, "gen code size %td/%ld\n",
4384 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4385 cpu_fprintf(f
, "TB count %d/%d\n",
4386 nb_tbs
, code_gen_max_blocks
);
4387 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4388 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4389 max_target_code_size
);
4390 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4391 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4392 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4393 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4395 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4396 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4398 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4400 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4401 cpu_fprintf(f
, "\nStatistics:\n");
4402 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4403 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4404 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4405 tcg_dump_info(f
, cpu_fprintf
);
4408 #define MMUSUFFIX _cmmu
4409 #define GETPC() NULL
4410 #define env cpu_single_env
4411 #define SOFTMMU_CODE_ACCESS
4414 #include "softmmu_template.h"
4417 #include "softmmu_template.h"
4420 #include "softmmu_template.h"
4423 #include "softmmu_template.h"