2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
36 #if defined(CONFIG_USER_ONLY)
39 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40 #include <sys/param.h>
41 #if __FreeBSD_version >= 700104
42 #define HAVE_KINFO_GETVMMAP
43 #define sigqueue sigqueue_freebsd /* avoid redefinition */
46 #include <machine/profile.h>
56 //#define DEBUG_TB_INVALIDATE
59 //#define DEBUG_UNASSIGNED
61 /* make various TB consistency checks */
62 //#define DEBUG_TB_CHECK
63 //#define DEBUG_TLB_CHECK
65 //#define DEBUG_IOPORT
66 //#define DEBUG_SUBPAGE
68 #if !defined(CONFIG_USER_ONLY)
69 /* TB consistency checks only implemented for usermode emulation. */
73 #define SMC_BITMAP_USE_THRESHOLD 10
75 static TranslationBlock
*tbs
;
76 static int code_gen_max_blocks
;
77 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
79 /* any access to the tbs or the page table must use this lock */
80 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
82 #if defined(__arm__) || defined(__sparc_v9__)
83 /* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
85 section close to code segment. */
86 #define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
90 /* Maximum alignment for Win32 is 16. */
91 #define code_gen_section \
92 __attribute__((aligned (16)))
94 #define code_gen_section \
95 __attribute__((aligned (32)))
98 uint8_t code_gen_prologue
[1024] code_gen_section
;
99 static uint8_t *code_gen_buffer
;
100 static unsigned long code_gen_buffer_size
;
101 /* threshold to flush the translated code buffer */
102 static unsigned long code_gen_buffer_max_size
;
103 static uint8_t *code_gen_ptr
;
105 #if !defined(CONFIG_USER_ONLY)
107 static int in_migration
;
109 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
) };
113 /* current CPU in the current thread. It is only valid inside
115 CPUState
*cpu_single_env
;
116 /* 0 = Do not count executed instructions.
117 1 = Precise instruction counting.
118 2 = Adaptive rate instruction counting. */
120 /* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
124 typedef struct PageDesc
{
125 /* list of TBs intersecting this ram page */
126 TranslationBlock
*first_tb
;
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count
;
130 uint8_t *code_bitmap
;
131 #if defined(CONFIG_USER_ONLY)
136 /* In system mode we want L1_MAP to be based on ram offsets,
137 while in user mode we want it to be based on virtual addresses. */
138 #if !defined(CONFIG_USER_ONLY)
139 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
142 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
145 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
148 /* Size of the L2 (and L3, etc) page tables. */
150 #define L2_SIZE (1 << L2_BITS)
152 /* The bits remaining after N lower levels of page tables. */
153 #define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155 #define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
158 /* Size of the L1 page table. Avoid silly small sizes. */
159 #if P_L1_BITS_REM < 4
160 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
162 #define P_L1_BITS P_L1_BITS_REM
165 #if V_L1_BITS_REM < 4
166 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
168 #define V_L1_BITS V_L1_BITS_REM
171 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
174 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
177 unsigned long qemu_real_host_page_size
;
178 unsigned long qemu_host_page_bits
;
179 unsigned long qemu_host_page_size
;
180 unsigned long qemu_host_page_mask
;
182 /* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184 static void *l1_map
[V_L1_SIZE
];
186 #if !defined(CONFIG_USER_ONLY)
187 typedef struct PhysPageDesc
{
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset
;
190 ram_addr_t region_offset
;
193 /* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195 static void *l1_phys_map
[P_L1_SIZE
];
197 static void io_mem_init(void);
199 /* io memory support */
200 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
201 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
202 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
203 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
204 static int io_mem_watch
;
209 static const char *logfilename
= "qemu.log";
211 static const char *logfilename
= "/tmp/qemu.log";
215 static int log_append
= 0;
218 #if !defined(CONFIG_USER_ONLY)
219 static int tlb_flush_count
;
221 static int tb_flush_count
;
222 static int tb_phys_invalidate_count
;
225 static void map_exec(void *addr
, long size
)
228 VirtualProtect(addr
, size
,
229 PAGE_EXECUTE_READWRITE
, &old_protect
);
233 static void map_exec(void *addr
, long size
)
235 unsigned long start
, end
, page_size
;
237 page_size
= getpagesize();
238 start
= (unsigned long)addr
;
239 start
&= ~(page_size
- 1);
241 end
= (unsigned long)addr
+ size
;
242 end
+= page_size
- 1;
243 end
&= ~(page_size
- 1);
245 mprotect((void *)start
, end
- start
,
246 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
250 static void page_init(void)
252 /* NOTE: we can always suppose that qemu_host_page_size >=
256 SYSTEM_INFO system_info
;
258 GetSystemInfo(&system_info
);
259 qemu_real_host_page_size
= system_info
.dwPageSize
;
262 qemu_real_host_page_size
= getpagesize();
264 if (qemu_host_page_size
== 0)
265 qemu_host_page_size
= qemu_real_host_page_size
;
266 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
267 qemu_host_page_size
= TARGET_PAGE_SIZE
;
268 qemu_host_page_bits
= 0;
269 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
270 qemu_host_page_bits
++;
271 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
273 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
275 #ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry
*freep
;
279 freep
= kinfo_getvmmap(getpid(), &cnt
);
282 for (i
= 0; i
< cnt
; i
++) {
283 unsigned long startaddr
, endaddr
;
285 startaddr
= freep
[i
].kve_start
;
286 endaddr
= freep
[i
].kve_end
;
287 if (h2g_valid(startaddr
)) {
288 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
290 if (h2g_valid(endaddr
)) {
291 endaddr
= h2g(endaddr
);
292 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
294 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
296 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
307 last_brk
= (unsigned long)sbrk(0);
309 f
= fopen("/compat/linux/proc/self/maps", "r");
314 unsigned long startaddr
, endaddr
;
317 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
319 if (n
== 2 && h2g_valid(startaddr
)) {
320 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
322 if (h2g_valid(endaddr
)) {
323 endaddr
= h2g(endaddr
);
327 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
339 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
345 #if defined(CONFIG_USER_ONLY)
346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347 # define ALLOC(P, SIZE) \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
353 # define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
357 /* Level 1. Always allocated. */
358 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
361 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
368 ALLOC(p
, sizeof(void *) * L2_SIZE
);
372 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
380 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
386 return pd
+ (index
& (L2_SIZE
- 1));
389 static inline PageDesc
*page_find(tb_page_addr_t index
)
391 return page_find_alloc(index
, 0);
394 #if !defined(CONFIG_USER_ONLY)
395 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
401 /* Level 1. Always allocated. */
402 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
405 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
411 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
413 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
426 for (i
= 0; i
< L2_SIZE
; i
++) {
427 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
428 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
432 return pd
+ (index
& (L2_SIZE
- 1));
435 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
437 return phys_page_find_alloc(index
, 0);
440 static void tlb_protect_code(ram_addr_t ram_addr
);
441 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
443 #define mmap_lock() do { } while(0)
444 #define mmap_unlock() do { } while(0)
447 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
449 #if defined(CONFIG_USER_ONLY)
450 /* Currently it is not recommended to allocate big chunks of data in
451 user mode. It will change when a dedicated libc will be used */
452 #define USE_STATIC_CODE_GEN_BUFFER
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
457 __attribute__((aligned (CODE_GEN_ALIGN
)));
460 static void code_gen_alloc(unsigned long tb_size
)
462 #ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer
= static_code_gen_buffer
;
464 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
465 map_exec(code_gen_buffer
, code_gen_buffer_size
);
467 code_gen_buffer_size
= tb_size
;
468 if (code_gen_buffer_size
== 0) {
469 #if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
473 /* XXX: needs adjustments */
474 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
477 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
478 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481 #if defined(__linux__)
486 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
487 #if defined(__x86_64__)
489 /* Cannot map more than that */
490 if (code_gen_buffer_size
> (800 * 1024 * 1024))
491 code_gen_buffer_size
= (800 * 1024 * 1024);
492 #elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
495 start
= (void *) 0x60000000UL
;
496 if (code_gen_buffer_size
> (512 * 1024 * 1024))
497 code_gen_buffer_size
= (512 * 1024 * 1024);
498 #elif defined(__arm__)
499 /* Map the buffer below 32M, so we can use direct calls and branches */
501 start
= (void *) 0x01000000UL
;
502 if (code_gen_buffer_size
> 16 * 1024 * 1024)
503 code_gen_buffer_size
= 16 * 1024 * 1024;
504 #elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
510 start
= (void *)0x90000000UL
;
512 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
513 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
515 if (code_gen_buffer
== MAP_FAILED
) {
516 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
520 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
525 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
526 #if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
530 addr
= (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size
> (800 * 1024 * 1024))
533 code_gen_buffer_size
= (800 * 1024 * 1024);
534 #elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
537 addr
= (void *) 0x60000000UL
;
538 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
539 code_gen_buffer_size
= (512 * 1024 * 1024);
542 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
543 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
545 if (code_gen_buffer
== MAP_FAILED
) {
546 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
551 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
552 map_exec(code_gen_buffer
, code_gen_buffer_size
);
554 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
555 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
556 code_gen_buffer_max_size
= code_gen_buffer_size
-
557 (TCG_MAX_OP_SIZE
* OPC_MAX_SIZE
);
558 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
559 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
562 /* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
565 void cpu_exec_init_all(unsigned long tb_size
)
568 code_gen_alloc(tb_size
);
569 code_gen_ptr
= code_gen_buffer
;
571 #if !defined(CONFIG_USER_ONLY)
574 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx
);
581 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
583 static int cpu_common_post_load(void *opaque
, int version_id
)
585 CPUState
*env
= opaque
;
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env
->interrupt_request
&= ~0x01;
595 static const VMStateDescription vmstate_cpu_common
= {
596 .name
= "cpu_common",
598 .minimum_version_id
= 1,
599 .minimum_version_id_old
= 1,
600 .post_load
= cpu_common_post_load
,
601 .fields
= (VMStateField
[]) {
602 VMSTATE_UINT32(halted
, CPUState
),
603 VMSTATE_UINT32(interrupt_request
, CPUState
),
604 VMSTATE_END_OF_LIST()
609 CPUState
*qemu_get_cpu(int cpu
)
611 CPUState
*env
= first_cpu
;
614 if (env
->cpu_index
== cpu
)
622 void cpu_exec_init(CPUState
*env
)
627 #if defined(CONFIG_USER_ONLY)
630 env
->next_cpu
= NULL
;
633 while (*penv
!= NULL
) {
634 penv
= &(*penv
)->next_cpu
;
637 env
->cpu_index
= cpu_index
;
639 QTAILQ_INIT(&env
->breakpoints
);
640 QTAILQ_INIT(&env
->watchpoints
);
642 #if defined(CONFIG_USER_ONLY)
645 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
646 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
647 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
648 cpu_save
, cpu_load
, env
);
652 /* Allocate a new translation block. Flush the translation buffer if
653 too many translation blocks or too much generated code. */
654 static TranslationBlock
*tb_alloc(target_ulong pc
)
656 TranslationBlock
*tb
;
658 if (nb_tbs
>= code_gen_max_blocks
||
659 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
667 void tb_free(TranslationBlock
*tb
)
669 /* In practice this is mostly used for single use temporary TB
670 Ignore the hard cases and just back up if this TB happens to
671 be the last one generated. */
672 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
673 code_gen_ptr
= tb
->tc_ptr
;
678 static inline void invalidate_page_bitmap(PageDesc
*p
)
680 if (p
->code_bitmap
) {
681 qemu_free(p
->code_bitmap
);
682 p
->code_bitmap
= NULL
;
684 p
->code_write_count
= 0;
687 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
689 static void page_flush_tb_1 (int level
, void **lp
)
698 for (i
= 0; i
< L2_SIZE
; ++i
) {
699 pd
[i
].first_tb
= NULL
;
700 invalidate_page_bitmap(pd
+ i
);
704 for (i
= 0; i
< L2_SIZE
; ++i
) {
705 page_flush_tb_1 (level
- 1, pp
+ i
);
710 static void page_flush_tb(void)
713 for (i
= 0; i
< V_L1_SIZE
; i
++) {
714 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
718 /* flush all the translation blocks */
719 /* XXX: tb_flush is currently not thread safe */
720 void tb_flush(CPUState
*env1
)
723 #if defined(DEBUG_FLUSH)
724 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
725 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
727 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
729 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
730 cpu_abort(env1
, "Internal error: code buffer overflow\n");
734 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
735 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
738 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
741 code_gen_ptr
= code_gen_buffer
;
742 /* XXX: flush processor icache at this point if cache flush is
747 #ifdef DEBUG_TB_CHECK
749 static void tb_invalidate_check(target_ulong address
)
751 TranslationBlock
*tb
;
753 address
&= TARGET_PAGE_MASK
;
754 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
755 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
756 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
757 address
>= tb
->pc
+ tb
->size
)) {
758 printf("ERROR invalidate: address=" TARGET_FMT_lx
759 " PC=%08lx size=%04x\n",
760 address
, (long)tb
->pc
, tb
->size
);
766 /* verify that all the pages have correct rights for code */
767 static void tb_page_check(void)
769 TranslationBlock
*tb
;
770 int i
, flags1
, flags2
;
772 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
773 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
774 flags1
= page_get_flags(tb
->pc
);
775 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
776 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
777 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
778 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
786 /* invalidate one TB */
787 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
790 TranslationBlock
*tb1
;
794 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
797 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
801 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
803 TranslationBlock
*tb1
;
809 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
811 *ptb
= tb1
->page_next
[n1
];
814 ptb
= &tb1
->page_next
[n1
];
818 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
820 TranslationBlock
*tb1
, **ptb
;
823 ptb
= &tb
->jmp_next
[n
];
826 /* find tb(n) in circular list */
830 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
831 if (n1
== n
&& tb1
== tb
)
834 ptb
= &tb1
->jmp_first
;
836 ptb
= &tb1
->jmp_next
[n1
];
839 /* now we can suppress tb(n) from the list */
840 *ptb
= tb
->jmp_next
[n
];
842 tb
->jmp_next
[n
] = NULL
;
846 /* reset the jump entry 'n' of a TB so that it is not chained to
848 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
850 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
853 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
858 tb_page_addr_t phys_pc
;
859 TranslationBlock
*tb1
, *tb2
;
861 /* remove the TB from the hash list */
862 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
863 h
= tb_phys_hash_func(phys_pc
);
864 tb_remove(&tb_phys_hash
[h
], tb
,
865 offsetof(TranslationBlock
, phys_hash_next
));
867 /* remove the TB from the page list */
868 if (tb
->page_addr
[0] != page_addr
) {
869 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
870 tb_page_remove(&p
->first_tb
, tb
);
871 invalidate_page_bitmap(p
);
873 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
874 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
875 tb_page_remove(&p
->first_tb
, tb
);
876 invalidate_page_bitmap(p
);
879 tb_invalidated_flag
= 1;
881 /* remove the TB from the hash list */
882 h
= tb_jmp_cache_hash_func(tb
->pc
);
883 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
884 if (env
->tb_jmp_cache
[h
] == tb
)
885 env
->tb_jmp_cache
[h
] = NULL
;
888 /* suppress this TB from the two jump lists */
889 tb_jmp_remove(tb
, 0);
890 tb_jmp_remove(tb
, 1);
892 /* suppress any remaining jumps to this TB */
898 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
899 tb2
= tb1
->jmp_next
[n1
];
900 tb_reset_jump(tb1
, n1
);
901 tb1
->jmp_next
[n1
] = NULL
;
904 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
906 tb_phys_invalidate_count
++;
909 static inline void set_bits(uint8_t *tab
, int start
, int len
)
915 mask
= 0xff << (start
& 7);
916 if ((start
& ~7) == (end
& ~7)) {
918 mask
&= ~(0xff << (end
& 7));
923 start
= (start
+ 8) & ~7;
925 while (start
< end1
) {
930 mask
= ~(0xff << (end
& 7));
936 static void build_page_bitmap(PageDesc
*p
)
938 int n
, tb_start
, tb_end
;
939 TranslationBlock
*tb
;
941 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
946 tb
= (TranslationBlock
*)((long)tb
& ~3);
947 /* NOTE: this is subtle as a TB may span two physical pages */
949 /* NOTE: tb_end may be after the end of the page, but
950 it is not a problem */
951 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
952 tb_end
= tb_start
+ tb
->size
;
953 if (tb_end
> TARGET_PAGE_SIZE
)
954 tb_end
= TARGET_PAGE_SIZE
;
957 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
959 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
960 tb
= tb
->page_next
[n
];
964 TranslationBlock
*tb_gen_code(CPUState
*env
,
965 target_ulong pc
, target_ulong cs_base
,
966 int flags
, int cflags
)
968 TranslationBlock
*tb
;
970 tb_page_addr_t phys_pc
, phys_page2
;
971 target_ulong virt_page2
;
974 phys_pc
= get_page_addr_code(env
, pc
);
977 /* flush must be done */
979 /* cannot fail at this point */
981 /* Don't forget to invalidate previous TB info. */
982 tb_invalidated_flag
= 1;
984 tc_ptr
= code_gen_ptr
;
986 tb
->cs_base
= cs_base
;
989 cpu_gen_code(env
, tb
, &code_gen_size
);
990 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
992 /* check next page if needed */
993 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
995 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
996 phys_page2
= get_page_addr_code(env
, virt_page2
);
998 tb_link_page(tb
, phys_pc
, phys_page2
);
1002 /* invalidate all TBs which intersect with the target physical page
1003 starting in range [start;end[. NOTE: start and end must refer to
1004 the same physical page. 'is_cpu_write_access' should be true if called
1005 from a real cpu write access: the virtual CPU will exit the current
1006 TB if code is modified inside this TB. */
1007 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1008 int is_cpu_write_access
)
1010 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1011 CPUState
*env
= cpu_single_env
;
1012 tb_page_addr_t tb_start
, tb_end
;
1015 #ifdef TARGET_HAS_PRECISE_SMC
1016 int current_tb_not_found
= is_cpu_write_access
;
1017 TranslationBlock
*current_tb
= NULL
;
1018 int current_tb_modified
= 0;
1019 target_ulong current_pc
= 0;
1020 target_ulong current_cs_base
= 0;
1021 int current_flags
= 0;
1022 #endif /* TARGET_HAS_PRECISE_SMC */
1024 p
= page_find(start
>> TARGET_PAGE_BITS
);
1027 if (!p
->code_bitmap
&&
1028 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1029 is_cpu_write_access
) {
1030 /* build code bitmap */
1031 build_page_bitmap(p
);
1034 /* we remove all the TBs in the range [start, end[ */
1035 /* XXX: see if in some cases it could be faster to invalidate all the code */
1037 while (tb
!= NULL
) {
1039 tb
= (TranslationBlock
*)((long)tb
& ~3);
1040 tb_next
= tb
->page_next
[n
];
1041 /* NOTE: this is subtle as a TB may span two physical pages */
1043 /* NOTE: tb_end may be after the end of the page, but
1044 it is not a problem */
1045 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1046 tb_end
= tb_start
+ tb
->size
;
1048 tb_start
= tb
->page_addr
[1];
1049 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1051 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1052 #ifdef TARGET_HAS_PRECISE_SMC
1053 if (current_tb_not_found
) {
1054 current_tb_not_found
= 0;
1056 if (env
->mem_io_pc
) {
1057 /* now we have a real cpu fault */
1058 current_tb
= tb_find_pc(env
->mem_io_pc
);
1061 if (current_tb
== tb
&&
1062 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1063 /* If we are modifying the current TB, we must stop
1064 its execution. We could be more precise by checking
1065 that the modification is after the current PC, but it
1066 would require a specialized function to partially
1067 restore the CPU state */
1069 current_tb_modified
= 1;
1070 cpu_restore_state(current_tb
, env
,
1071 env
->mem_io_pc
, NULL
);
1072 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1075 #endif /* TARGET_HAS_PRECISE_SMC */
1076 /* we need to do that to handle the case where a signal
1077 occurs while doing tb_phys_invalidate() */
1080 saved_tb
= env
->current_tb
;
1081 env
->current_tb
= NULL
;
1083 tb_phys_invalidate(tb
, -1);
1085 env
->current_tb
= saved_tb
;
1086 if (env
->interrupt_request
&& env
->current_tb
)
1087 cpu_interrupt(env
, env
->interrupt_request
);
1092 #if !defined(CONFIG_USER_ONLY)
1093 /* if no code remaining, no need to continue to use slow writes */
1095 invalidate_page_bitmap(p
);
1096 if (is_cpu_write_access
) {
1097 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1101 #ifdef TARGET_HAS_PRECISE_SMC
1102 if (current_tb_modified
) {
1103 /* we generate a block containing just the instruction
1104 modifying the memory. It will ensure that it cannot modify
1106 env
->current_tb
= NULL
;
1107 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1108 cpu_resume_from_signal(env
, NULL
);
1113 /* len must be <= 8 and start must be a multiple of len */
1114 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1120 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1121 cpu_single_env
->mem_io_vaddr
, len
,
1122 cpu_single_env
->eip
,
1123 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1126 p
= page_find(start
>> TARGET_PAGE_BITS
);
1129 if (p
->code_bitmap
) {
1130 offset
= start
& ~TARGET_PAGE_MASK
;
1131 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1132 if (b
& ((1 << len
) - 1))
1136 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1140 #if !defined(CONFIG_SOFTMMU)
1141 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1142 unsigned long pc
, void *puc
)
1144 TranslationBlock
*tb
;
1147 #ifdef TARGET_HAS_PRECISE_SMC
1148 TranslationBlock
*current_tb
= NULL
;
1149 CPUState
*env
= cpu_single_env
;
1150 int current_tb_modified
= 0;
1151 target_ulong current_pc
= 0;
1152 target_ulong current_cs_base
= 0;
1153 int current_flags
= 0;
1156 addr
&= TARGET_PAGE_MASK
;
1157 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1161 #ifdef TARGET_HAS_PRECISE_SMC
1162 if (tb
&& pc
!= 0) {
1163 current_tb
= tb_find_pc(pc
);
1166 while (tb
!= NULL
) {
1168 tb
= (TranslationBlock
*)((long)tb
& ~3);
1169 #ifdef TARGET_HAS_PRECISE_SMC
1170 if (current_tb
== tb
&&
1171 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1172 /* If we are modifying the current TB, we must stop
1173 its execution. We could be more precise by checking
1174 that the modification is after the current PC, but it
1175 would require a specialized function to partially
1176 restore the CPU state */
1178 current_tb_modified
= 1;
1179 cpu_restore_state(current_tb
, env
, pc
, puc
);
1180 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1183 #endif /* TARGET_HAS_PRECISE_SMC */
1184 tb_phys_invalidate(tb
, addr
);
1185 tb
= tb
->page_next
[n
];
1188 #ifdef TARGET_HAS_PRECISE_SMC
1189 if (current_tb_modified
) {
1190 /* we generate a block containing just the instruction
1191 modifying the memory. It will ensure that it cannot modify
1193 env
->current_tb
= NULL
;
1194 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1195 cpu_resume_from_signal(env
, puc
);
1201 /* add the tb in the target page and protect it if necessary */
1202 static inline void tb_alloc_page(TranslationBlock
*tb
,
1203 unsigned int n
, tb_page_addr_t page_addr
)
1206 TranslationBlock
*last_first_tb
;
1208 tb
->page_addr
[n
] = page_addr
;
1209 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1210 tb
->page_next
[n
] = p
->first_tb
;
1211 last_first_tb
= p
->first_tb
;
1212 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1213 invalidate_page_bitmap(p
);
1215 #if defined(TARGET_HAS_SMC) || 1
1217 #if defined(CONFIG_USER_ONLY)
1218 if (p
->flags
& PAGE_WRITE
) {
1223 /* force the host page as non writable (writes will have a
1224 page fault + mprotect overhead) */
1225 page_addr
&= qemu_host_page_mask
;
1227 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1228 addr
+= TARGET_PAGE_SIZE
) {
1230 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1234 p2
->flags
&= ~PAGE_WRITE
;
1236 mprotect(g2h(page_addr
), qemu_host_page_size
,
1237 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1238 #ifdef DEBUG_TB_INVALIDATE
1239 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1244 /* if some code is already present, then the pages are already
1245 protected. So we handle the case where only the first TB is
1246 allocated in a physical page */
1247 if (!last_first_tb
) {
1248 tlb_protect_code(page_addr
);
1252 #endif /* TARGET_HAS_SMC */
1255 /* add a new TB and link it to the physical page tables. phys_page2 is
1256 (-1) to indicate that only one page contains the TB. */
1257 void tb_link_page(TranslationBlock
*tb
,
1258 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1261 TranslationBlock
**ptb
;
1263 /* Grab the mmap lock to stop another thread invalidating this TB
1264 before we are done. */
1266 /* add in the physical hash table */
1267 h
= tb_phys_hash_func(phys_pc
);
1268 ptb
= &tb_phys_hash
[h
];
1269 tb
->phys_hash_next
= *ptb
;
1272 /* add in the page list */
1273 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1274 if (phys_page2
!= -1)
1275 tb_alloc_page(tb
, 1, phys_page2
);
1277 tb
->page_addr
[1] = -1;
1279 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1280 tb
->jmp_next
[0] = NULL
;
1281 tb
->jmp_next
[1] = NULL
;
1283 /* init original jump addresses */
1284 if (tb
->tb_next_offset
[0] != 0xffff)
1285 tb_reset_jump(tb
, 0);
1286 if (tb
->tb_next_offset
[1] != 0xffff)
1287 tb_reset_jump(tb
, 1);
1289 #ifdef DEBUG_TB_CHECK
1295 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1296 tb[1].tc_ptr. Return NULL if not found */
1297 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1299 int m_min
, m_max
, m
;
1301 TranslationBlock
*tb
;
1305 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1306 tc_ptr
>= (unsigned long)code_gen_ptr
)
1308 /* binary search (cf Knuth) */
1311 while (m_min
<= m_max
) {
1312 m
= (m_min
+ m_max
) >> 1;
1314 v
= (unsigned long)tb
->tc_ptr
;
1317 else if (tc_ptr
< v
) {
1326 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1328 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1330 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1333 tb1
= tb
->jmp_next
[n
];
1335 /* find head of list */
1338 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1341 tb1
= tb1
->jmp_next
[n1
];
1343 /* we are now sure now that tb jumps to tb1 */
1346 /* remove tb from the jmp_first list */
1347 ptb
= &tb_next
->jmp_first
;
1351 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1352 if (n1
== n
&& tb1
== tb
)
1354 ptb
= &tb1
->jmp_next
[n1
];
1356 *ptb
= tb
->jmp_next
[n
];
1357 tb
->jmp_next
[n
] = NULL
;
1359 /* suppress the jump to next tb in generated code */
1360 tb_reset_jump(tb
, n
);
1362 /* suppress jumps in the tb on which we could have jumped */
1363 tb_reset_jump_recursive(tb_next
);
1367 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1369 tb_reset_jump_recursive2(tb
, 0);
1370 tb_reset_jump_recursive2(tb
, 1);
1373 #if defined(TARGET_HAS_ICE)
1374 #if defined(CONFIG_USER_ONLY)
1375 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1377 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1380 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1382 target_phys_addr_t addr
;
1384 ram_addr_t ram_addr
;
1387 addr
= cpu_get_phys_page_debug(env
, pc
);
1388 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1390 pd
= IO_MEM_UNASSIGNED
;
1392 pd
= p
->phys_offset
;
1394 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1395 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1398 #endif /* TARGET_HAS_ICE */
1400 #if defined(CONFIG_USER_ONLY)
1401 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1406 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1407 int flags
, CPUWatchpoint
**watchpoint
)
1412 /* Add a watchpoint. */
1413 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1414 int flags
, CPUWatchpoint
**watchpoint
)
1416 target_ulong len_mask
= ~(len
- 1);
1419 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1420 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1421 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1422 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1425 wp
= qemu_malloc(sizeof(*wp
));
1428 wp
->len_mask
= len_mask
;
1431 /* keep all GDB-injected watchpoints in front */
1433 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1435 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1437 tlb_flush_page(env
, addr
);
1444 /* Remove a specific watchpoint. */
1445 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1448 target_ulong len_mask
= ~(len
- 1);
1451 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1452 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1453 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1454 cpu_watchpoint_remove_by_ref(env
, wp
);
1461 /* Remove a specific watchpoint by reference. */
1462 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1464 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1466 tlb_flush_page(env
, watchpoint
->vaddr
);
1468 qemu_free(watchpoint
);
1471 /* Remove all matching watchpoints. */
1472 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1474 CPUWatchpoint
*wp
, *next
;
1476 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1477 if (wp
->flags
& mask
)
1478 cpu_watchpoint_remove_by_ref(env
, wp
);
1483 /* Add a breakpoint. */
1484 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1485 CPUBreakpoint
**breakpoint
)
1487 #if defined(TARGET_HAS_ICE)
1490 bp
= qemu_malloc(sizeof(*bp
));
1495 /* keep all GDB-injected breakpoints in front */
1497 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1499 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1501 breakpoint_invalidate(env
, pc
);
1511 /* Remove a specific breakpoint. */
1512 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1514 #if defined(TARGET_HAS_ICE)
1517 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1518 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1519 cpu_breakpoint_remove_by_ref(env
, bp
);
1529 /* Remove a specific breakpoint by reference. */
1530 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1532 #if defined(TARGET_HAS_ICE)
1533 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1535 breakpoint_invalidate(env
, breakpoint
->pc
);
1537 qemu_free(breakpoint
);
1541 /* Remove all matching breakpoints. */
1542 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1544 #if defined(TARGET_HAS_ICE)
1545 CPUBreakpoint
*bp
, *next
;
1547 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1548 if (bp
->flags
& mask
)
1549 cpu_breakpoint_remove_by_ref(env
, bp
);
1554 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1555 CPU loop after each instruction */
1556 void cpu_single_step(CPUState
*env
, int enabled
)
1558 #if defined(TARGET_HAS_ICE)
1559 if (env
->singlestep_enabled
!= enabled
) {
1560 env
->singlestep_enabled
= enabled
;
1562 kvm_update_guest_debug(env
, 0);
1564 /* must flush all the translated code to avoid inconsistencies */
1565 /* XXX: only flush what is necessary */
1572 /* enable or disable low levels log */
1573 void cpu_set_log(int log_flags
)
1575 loglevel
= log_flags
;
1576 if (loglevel
&& !logfile
) {
1577 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1579 perror(logfilename
);
1582 #if !defined(CONFIG_SOFTMMU)
1583 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1585 static char logfile_buf
[4096];
1586 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1588 #elif !defined(_WIN32)
1589 /* Win32 doesn't support line-buffering and requires size >= 2 */
1590 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1594 if (!loglevel
&& logfile
) {
1600 void cpu_set_log_filename(const char *filename
)
1602 logfilename
= strdup(filename
);
1607 cpu_set_log(loglevel
);
1610 static void cpu_unlink_tb(CPUState
*env
)
1612 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1613 problem and hope the cpu will stop of its own accord. For userspace
1614 emulation this often isn't actually as bad as it sounds. Often
1615 signals are used primarily to interrupt blocking syscalls. */
1616 TranslationBlock
*tb
;
1617 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1619 spin_lock(&interrupt_lock
);
1620 tb
= env
->current_tb
;
1621 /* if the cpu is currently executing code, we must unlink it and
1622 all the potentially executing TB */
1624 env
->current_tb
= NULL
;
1625 tb_reset_jump_recursive(tb
);
1627 spin_unlock(&interrupt_lock
);
1630 /* mask must never be zero, except for A20 change call */
1631 void cpu_interrupt(CPUState
*env
, int mask
)
1635 old_mask
= env
->interrupt_request
;
1636 env
->interrupt_request
|= mask
;
1638 #ifndef CONFIG_USER_ONLY
1640 * If called from iothread context, wake the target cpu in
1643 if (!qemu_cpu_is_self(env
)) {
1650 env
->icount_decr
.u16
.high
= 0xffff;
1651 #ifndef CONFIG_USER_ONLY
1653 && (mask
& ~old_mask
) != 0) {
1654 cpu_abort(env
, "Raised interrupt while not in I/O function");
1662 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1664 env
->interrupt_request
&= ~mask
;
1667 void cpu_exit(CPUState
*env
)
1669 env
->exit_request
= 1;
1673 const CPULogItem cpu_log_items
[] = {
1674 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1675 "show generated host assembly code for each compiled TB" },
1676 { CPU_LOG_TB_IN_ASM
, "in_asm",
1677 "show target assembly code for each compiled TB" },
1678 { CPU_LOG_TB_OP
, "op",
1679 "show micro ops for each compiled TB" },
1680 { CPU_LOG_TB_OP_OPT
, "op_opt",
1683 "before eflags optimization and "
1685 "after liveness analysis" },
1686 { CPU_LOG_INT
, "int",
1687 "show interrupts/exceptions in short format" },
1688 { CPU_LOG_EXEC
, "exec",
1689 "show trace before each executed TB (lots of logs)" },
1690 { CPU_LOG_TB_CPU
, "cpu",
1691 "show CPU state before block translation" },
1693 { CPU_LOG_PCALL
, "pcall",
1694 "show protected mode far calls/returns/exceptions" },
1695 { CPU_LOG_RESET
, "cpu_reset",
1696 "show CPU state before CPU resets" },
1699 { CPU_LOG_IOPORT
, "ioport",
1700 "show all i/o ports accesses" },
1705 #ifndef CONFIG_USER_ONLY
1706 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1707 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1709 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1711 ram_addr_t phys_offset
)
1713 CPUPhysMemoryClient
*client
;
1714 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1715 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1719 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1720 target_phys_addr_t end
)
1722 CPUPhysMemoryClient
*client
;
1723 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1724 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1731 static int cpu_notify_migration_log(int enable
)
1733 CPUPhysMemoryClient
*client
;
1734 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1735 int r
= client
->migration_log(client
, enable
);
1742 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1743 int level
, void **lp
)
1751 PhysPageDesc
*pd
= *lp
;
1752 for (i
= 0; i
< L2_SIZE
; ++i
) {
1753 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1754 client
->set_memory(client
, pd
[i
].region_offset
,
1755 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1760 for (i
= 0; i
< L2_SIZE
; ++i
) {
1761 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1766 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1769 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1770 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1775 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1777 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1778 phys_page_for_each(client
);
1781 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1783 QLIST_REMOVE(client
, list
);
1787 static int cmp1(const char *s1
, int n
, const char *s2
)
1789 if (strlen(s2
) != n
)
1791 return memcmp(s1
, s2
, n
) == 0;
1794 /* takes a comma separated list of log masks. Return 0 if error. */
1795 int cpu_str_to_log_mask(const char *str
)
1797 const CPULogItem
*item
;
1804 p1
= strchr(p
, ',');
1807 if(cmp1(p
,p1
-p
,"all")) {
1808 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1812 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1813 if (cmp1(p
, p1
- p
, item
->name
))
1827 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1834 fprintf(stderr
, "qemu: fatal: ");
1835 vfprintf(stderr
, fmt
, ap
);
1836 fprintf(stderr
, "\n");
1838 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1840 cpu_dump_state(env
, stderr
, fprintf
, 0);
1842 if (qemu_log_enabled()) {
1843 qemu_log("qemu: fatal: ");
1844 qemu_log_vprintf(fmt
, ap2
);
1847 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1849 log_cpu_state(env
, 0);
1856 #if defined(CONFIG_USER_ONLY)
1858 struct sigaction act
;
1859 sigfillset(&act
.sa_mask
);
1860 act
.sa_handler
= SIG_DFL
;
1861 sigaction(SIGABRT
, &act
, NULL
);
1867 CPUState
*cpu_copy(CPUState
*env
)
1869 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1870 CPUState
*next_cpu
= new_env
->next_cpu
;
1871 int cpu_index
= new_env
->cpu_index
;
1872 #if defined(TARGET_HAS_ICE)
1877 memcpy(new_env
, env
, sizeof(CPUState
));
1879 /* Preserve chaining and index. */
1880 new_env
->next_cpu
= next_cpu
;
1881 new_env
->cpu_index
= cpu_index
;
1883 /* Clone all break/watchpoints.
1884 Note: Once we support ptrace with hw-debug register access, make sure
1885 BP_CPU break/watchpoints are handled correctly on clone. */
1886 QTAILQ_INIT(&env
->breakpoints
);
1887 QTAILQ_INIT(&env
->watchpoints
);
1888 #if defined(TARGET_HAS_ICE)
1889 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1890 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1892 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1893 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1901 #if !defined(CONFIG_USER_ONLY)
1903 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1907 /* Discard jump cache entries for any tb which might potentially
1908 overlap the flushed page. */
1909 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1910 memset (&env
->tb_jmp_cache
[i
], 0,
1911 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1913 i
= tb_jmp_cache_hash_page(addr
);
1914 memset (&env
->tb_jmp_cache
[i
], 0,
1915 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1918 static CPUTLBEntry s_cputlb_empty_entry
= {
1925 /* NOTE: if flush_global is true, also flush global entries (not
1927 void tlb_flush(CPUState
*env
, int flush_global
)
1931 #if defined(DEBUG_TLB)
1932 printf("tlb_flush:\n");
1934 /* must reset current TB so that interrupts cannot modify the
1935 links while we are modifying them */
1936 env
->current_tb
= NULL
;
1938 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1940 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1941 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1945 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1947 env
->tlb_flush_addr
= -1;
1948 env
->tlb_flush_mask
= 0;
1952 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1954 if (addr
== (tlb_entry
->addr_read
&
1955 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1956 addr
== (tlb_entry
->addr_write
&
1957 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1958 addr
== (tlb_entry
->addr_code
&
1959 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1960 *tlb_entry
= s_cputlb_empty_entry
;
1964 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1969 #if defined(DEBUG_TLB)
1970 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1972 /* Check if we need to flush due to large pages. */
1973 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1974 #if defined(DEBUG_TLB)
1975 printf("tlb_flush_page: forced full flush ("
1976 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1977 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1982 /* must reset current TB so that interrupts cannot modify the
1983 links while we are modifying them */
1984 env
->current_tb
= NULL
;
1986 addr
&= TARGET_PAGE_MASK
;
1987 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1988 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1989 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1991 tlb_flush_jmp_cache(env
, addr
);
1994 /* update the TLBs so that writes to code in the virtual page 'addr'
1996 static void tlb_protect_code(ram_addr_t ram_addr
)
1998 cpu_physical_memory_reset_dirty(ram_addr
,
1999 ram_addr
+ TARGET_PAGE_SIZE
,
2003 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2004 tested for self modifying code */
2005 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2008 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2011 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2012 unsigned long start
, unsigned long length
)
2015 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2016 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2017 if ((addr
- start
) < length
) {
2018 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2023 /* Note: start and end must be within the same ram block. */
2024 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2028 unsigned long length
, start1
;
2031 start
&= TARGET_PAGE_MASK
;
2032 end
= TARGET_PAGE_ALIGN(end
);
2034 length
= end
- start
;
2037 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2039 /* we modify the TLB cache so that the dirty bit will be set again
2040 when accessing the range */
2041 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2042 /* Chek that we don't span multiple blocks - this breaks the
2043 address comparisons below. */
2044 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2045 != (end
- 1) - start
) {
2049 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2051 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2052 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2053 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2059 int cpu_physical_memory_set_dirty_tracking(int enable
)
2062 in_migration
= enable
;
2063 ret
= cpu_notify_migration_log(!!enable
);
2067 int cpu_physical_memory_get_dirty_tracking(void)
2069 return in_migration
;
2072 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2073 target_phys_addr_t end_addr
)
2077 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2081 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2084 CPUPhysMemoryClient
*client
;
2085 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2086 if (client
->log_start
) {
2087 int r
= client
->log_start(client
, start_addr
, size
);
2096 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2099 CPUPhysMemoryClient
*client
;
2100 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2101 if (client
->log_stop
) {
2102 int r
= client
->log_stop(client
, start_addr
, size
);
2111 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2113 ram_addr_t ram_addr
;
2116 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2117 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2118 + tlb_entry
->addend
);
2119 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2120 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2121 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2126 /* update the TLB according to the current state of the dirty bits */
2127 void cpu_tlb_update_dirty(CPUState
*env
)
2131 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2132 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2133 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2137 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2139 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2140 tlb_entry
->addr_write
= vaddr
;
2143 /* update the TLB corresponding to virtual page vaddr
2144 so that it is no longer dirty */
2145 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2150 vaddr
&= TARGET_PAGE_MASK
;
2151 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2152 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2153 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2156 /* Our TLB does not support large pages, so remember the area covered by
2157 large pages and trigger a full TLB flush if these are invalidated. */
2158 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2161 target_ulong mask
= ~(size
- 1);
2163 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2164 env
->tlb_flush_addr
= vaddr
& mask
;
2165 env
->tlb_flush_mask
= mask
;
2168 /* Extend the existing region to include the new page.
2169 This is a compromise between unnecessary flushes and the cost
2170 of maintaining a full variable size TLB. */
2171 mask
&= env
->tlb_flush_mask
;
2172 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2175 env
->tlb_flush_addr
&= mask
;
2176 env
->tlb_flush_mask
= mask
;
2179 /* Add a new TLB entry. At most one entry for a given virtual address
2180 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2181 supplied size is only used by tlb_flush_page. */
2182 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2183 target_phys_addr_t paddr
, int prot
,
2184 int mmu_idx
, target_ulong size
)
2189 target_ulong address
;
2190 target_ulong code_address
;
2191 unsigned long addend
;
2194 target_phys_addr_t iotlb
;
2196 assert(size
>= TARGET_PAGE_SIZE
);
2197 if (size
!= TARGET_PAGE_SIZE
) {
2198 tlb_add_large_page(env
, vaddr
, size
);
2200 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2202 pd
= IO_MEM_UNASSIGNED
;
2204 pd
= p
->phys_offset
;
2206 #if defined(DEBUG_TLB)
2207 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2208 " prot=%x idx=%d pd=0x%08lx\n",
2209 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2213 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2214 /* IO memory case (romd handled later) */
2215 address
|= TLB_MMIO
;
2217 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2218 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2220 iotlb
= pd
& TARGET_PAGE_MASK
;
2221 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2222 iotlb
|= IO_MEM_NOTDIRTY
;
2224 iotlb
|= IO_MEM_ROM
;
2226 /* IO handlers are currently passed a physical address.
2227 It would be nice to pass an offset from the base address
2228 of that region. This would avoid having to special case RAM,
2229 and avoid full address decoding in every device.
2230 We can't use the high bits of pd for this because
2231 IO_MEM_ROMD uses these as a ram address. */
2232 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2234 iotlb
+= p
->region_offset
;
2240 code_address
= address
;
2241 /* Make accesses to pages with watchpoints go via the
2242 watchpoint trap routines. */
2243 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2244 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2245 /* Avoid trapping reads of pages with a write breakpoint. */
2246 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2247 iotlb
= io_mem_watch
+ paddr
;
2248 address
|= TLB_MMIO
;
2254 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2255 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2256 te
= &env
->tlb_table
[mmu_idx
][index
];
2257 te
->addend
= addend
- vaddr
;
2258 if (prot
& PAGE_READ
) {
2259 te
->addr_read
= address
;
2264 if (prot
& PAGE_EXEC
) {
2265 te
->addr_code
= code_address
;
2269 if (prot
& PAGE_WRITE
) {
2270 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2271 (pd
& IO_MEM_ROMD
)) {
2272 /* Write access calls the I/O callback. */
2273 te
->addr_write
= address
| TLB_MMIO
;
2274 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2275 !cpu_physical_memory_is_dirty(pd
)) {
2276 te
->addr_write
= address
| TLB_NOTDIRTY
;
2278 te
->addr_write
= address
;
2281 te
->addr_write
= -1;
2287 void tlb_flush(CPUState
*env
, int flush_global
)
2291 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2296 * Walks guest process memory "regions" one by one
2297 * and calls callback function 'fn' for each region.
2300 struct walk_memory_regions_data
2302 walk_memory_regions_fn fn
;
2304 unsigned long start
;
2308 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2309 abi_ulong end
, int new_prot
)
2311 if (data
->start
!= -1ul) {
2312 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2318 data
->start
= (new_prot
? end
: -1ul);
2319 data
->prot
= new_prot
;
2324 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2325 abi_ulong base
, int level
, void **lp
)
2331 return walk_memory_regions_end(data
, base
, 0);
2336 for (i
= 0; i
< L2_SIZE
; ++i
) {
2337 int prot
= pd
[i
].flags
;
2339 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2340 if (prot
!= data
->prot
) {
2341 rc
= walk_memory_regions_end(data
, pa
, prot
);
2349 for (i
= 0; i
< L2_SIZE
; ++i
) {
2350 pa
= base
| ((abi_ulong
)i
<<
2351 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2352 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2362 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2364 struct walk_memory_regions_data data
;
2372 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2373 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2374 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2380 return walk_memory_regions_end(&data
, 0, 0);
2383 static int dump_region(void *priv
, abi_ulong start
,
2384 abi_ulong end
, unsigned long prot
)
2386 FILE *f
= (FILE *)priv
;
2388 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2389 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2390 start
, end
, end
- start
,
2391 ((prot
& PAGE_READ
) ? 'r' : '-'),
2392 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2393 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2398 /* dump memory mappings */
2399 void page_dump(FILE *f
)
2401 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2402 "start", "end", "size", "prot");
2403 walk_memory_regions(f
, dump_region
);
2406 int page_get_flags(target_ulong address
)
2410 p
= page_find(address
>> TARGET_PAGE_BITS
);
2416 /* Modify the flags of a page and invalidate the code if necessary.
2417 The flag PAGE_WRITE_ORG is positioned automatically depending
2418 on PAGE_WRITE. The mmap_lock should already be held. */
2419 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2421 target_ulong addr
, len
;
2423 /* This function should never be called with addresses outside the
2424 guest address space. If this assert fires, it probably indicates
2425 a missing call to h2g_valid. */
2426 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2427 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2429 assert(start
< end
);
2431 start
= start
& TARGET_PAGE_MASK
;
2432 end
= TARGET_PAGE_ALIGN(end
);
2434 if (flags
& PAGE_WRITE
) {
2435 flags
|= PAGE_WRITE_ORG
;
2438 for (addr
= start
, len
= end
- start
;
2440 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2441 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2443 /* If the write protection bit is set, then we invalidate
2445 if (!(p
->flags
& PAGE_WRITE
) &&
2446 (flags
& PAGE_WRITE
) &&
2448 tb_invalidate_phys_page(addr
, 0, NULL
);
2454 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2460 /* This function should never be called with addresses outside the
2461 guest address space. If this assert fires, it probably indicates
2462 a missing call to h2g_valid. */
2463 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2464 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2470 if (start
+ len
- 1 < start
) {
2471 /* We've wrapped around. */
2475 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2476 start
= start
& TARGET_PAGE_MASK
;
2478 for (addr
= start
, len
= end
- start
;
2480 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2481 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2484 if( !(p
->flags
& PAGE_VALID
) )
2487 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2489 if (flags
& PAGE_WRITE
) {
2490 if (!(p
->flags
& PAGE_WRITE_ORG
))
2492 /* unprotect the page if it was put read-only because it
2493 contains translated code */
2494 if (!(p
->flags
& PAGE_WRITE
)) {
2495 if (!page_unprotect(addr
, 0, NULL
))
2504 /* called from signal handler: invalidate the code and unprotect the
2505 page. Return TRUE if the fault was successfully handled. */
2506 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2510 target_ulong host_start
, host_end
, addr
;
2512 /* Technically this isn't safe inside a signal handler. However we
2513 know this only ever happens in a synchronous SEGV handler, so in
2514 practice it seems to be ok. */
2517 p
= page_find(address
>> TARGET_PAGE_BITS
);
2523 /* if the page was really writable, then we change its
2524 protection back to writable */
2525 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2526 host_start
= address
& qemu_host_page_mask
;
2527 host_end
= host_start
+ qemu_host_page_size
;
2530 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2531 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2532 p
->flags
|= PAGE_WRITE
;
2535 /* and since the content will be modified, we must invalidate
2536 the corresponding translated code. */
2537 tb_invalidate_phys_page(addr
, pc
, puc
);
2538 #ifdef DEBUG_TB_CHECK
2539 tb_invalidate_check(addr
);
2542 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2552 static inline void tlb_set_dirty(CPUState
*env
,
2553 unsigned long addr
, target_ulong vaddr
)
2556 #endif /* defined(CONFIG_USER_ONLY) */
2558 #if !defined(CONFIG_USER_ONLY)
2560 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2561 typedef struct subpage_t
{
2562 target_phys_addr_t base
;
2563 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2564 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2567 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2568 ram_addr_t memory
, ram_addr_t region_offset
);
2569 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2570 ram_addr_t orig_memory
,
2571 ram_addr_t region_offset
);
2572 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2575 if (addr > start_addr) \
2578 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2579 if (start_addr2 > 0) \
2583 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2584 end_addr2 = TARGET_PAGE_SIZE - 1; \
2586 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2587 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2592 /* register physical memory.
2593 For RAM, 'size' must be a multiple of the target page size.
2594 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2595 io memory page. The address used when calling the IO function is
2596 the offset from the start of the region, plus region_offset. Both
2597 start_addr and region_offset are rounded down to a page boundary
2598 before calculating this offset. This should not be a problem unless
2599 the low bits of start_addr and region_offset differ. */
2600 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2602 ram_addr_t phys_offset
,
2603 ram_addr_t region_offset
)
2605 target_phys_addr_t addr
, end_addr
;
2608 ram_addr_t orig_size
= size
;
2611 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2613 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2614 region_offset
= start_addr
;
2616 region_offset
&= TARGET_PAGE_MASK
;
2617 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2618 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2619 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2620 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2621 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2622 ram_addr_t orig_memory
= p
->phys_offset
;
2623 target_phys_addr_t start_addr2
, end_addr2
;
2624 int need_subpage
= 0;
2626 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2629 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2630 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2631 &p
->phys_offset
, orig_memory
,
2634 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2637 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2639 p
->region_offset
= 0;
2641 p
->phys_offset
= phys_offset
;
2642 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2643 (phys_offset
& IO_MEM_ROMD
))
2644 phys_offset
+= TARGET_PAGE_SIZE
;
2647 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2648 p
->phys_offset
= phys_offset
;
2649 p
->region_offset
= region_offset
;
2650 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2651 (phys_offset
& IO_MEM_ROMD
)) {
2652 phys_offset
+= TARGET_PAGE_SIZE
;
2654 target_phys_addr_t start_addr2
, end_addr2
;
2655 int need_subpage
= 0;
2657 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2658 end_addr2
, need_subpage
);
2661 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2662 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2663 addr
& TARGET_PAGE_MASK
);
2664 subpage_register(subpage
, start_addr2
, end_addr2
,
2665 phys_offset
, region_offset
);
2666 p
->region_offset
= 0;
2670 region_offset
+= TARGET_PAGE_SIZE
;
2673 /* since each CPU stores ram addresses in its TLB cache, we must
2674 reset the modified entries */
2676 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2681 /* XXX: temporary until new memory mapping API */
2682 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2686 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2688 return IO_MEM_UNASSIGNED
;
2689 return p
->phys_offset
;
2692 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2695 kvm_coalesce_mmio_region(addr
, size
);
2698 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2701 kvm_uncoalesce_mmio_region(addr
, size
);
2704 void qemu_flush_coalesced_mmio_buffer(void)
2707 kvm_flush_coalesced_mmio_buffer();
2710 #if defined(__linux__) && !defined(TARGET_S390X)
2712 #include <sys/vfs.h>
2714 #define HUGETLBFS_MAGIC 0x958458f6
2716 static long gethugepagesize(const char *path
)
2722 ret
= statfs(path
, &fs
);
2723 } while (ret
!= 0 && errno
== EINTR
);
2730 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2731 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2736 static void *file_ram_alloc(RAMBlock
*block
,
2746 unsigned long hpagesize
;
2748 hpagesize
= gethugepagesize(path
);
2753 if (memory
< hpagesize
) {
2757 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2758 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2762 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2766 fd
= mkstemp(filename
);
2768 perror("unable to create backing store for hugepages");
2775 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2778 * ftruncate is not supported by hugetlbfs in older
2779 * hosts, so don't bother bailing out on errors.
2780 * If anything goes wrong with it under other filesystems,
2783 if (ftruncate(fd
, memory
))
2784 perror("ftruncate");
2787 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2788 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2789 * to sidestep this quirk.
2791 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2792 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2794 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2796 if (area
== MAP_FAILED
) {
2797 perror("file_ram_alloc: can't mmap RAM pages");
2806 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2808 RAMBlock
*block
, *next_block
;
2809 ram_addr_t offset
= 0, mingap
= ULONG_MAX
;
2811 if (QLIST_EMPTY(&ram_list
.blocks
))
2814 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2815 ram_addr_t end
, next
= ULONG_MAX
;
2817 end
= block
->offset
+ block
->length
;
2819 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2820 if (next_block
->offset
>= end
) {
2821 next
= MIN(next
, next_block
->offset
);
2824 if (next
- end
>= size
&& next
- end
< mingap
) {
2826 mingap
= next
- end
;
2832 static ram_addr_t
last_ram_offset(void)
2835 ram_addr_t last
= 0;
2837 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2838 last
= MAX(last
, block
->offset
+ block
->length
);
2843 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2844 ram_addr_t size
, void *host
)
2846 RAMBlock
*new_block
, *block
;
2848 size
= TARGET_PAGE_ALIGN(size
);
2849 new_block
= qemu_mallocz(sizeof(*new_block
));
2851 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2852 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2854 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2858 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2860 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2861 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2862 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2869 new_block
->host
= host
;
2870 new_block
->flags
|= RAM_PREALLOC_MASK
;
2873 #if defined (__linux__) && !defined(TARGET_S390X)
2874 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2875 if (!new_block
->host
) {
2876 new_block
->host
= qemu_vmalloc(size
);
2877 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2880 fprintf(stderr
, "-mem-path option unsupported\n");
2884 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2885 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2886 new_block
->host
= mmap((void*)0x1000000, size
,
2887 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2888 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2890 new_block
->host
= qemu_vmalloc(size
);
2892 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2896 new_block
->offset
= find_ram_offset(size
);
2897 new_block
->length
= size
;
2899 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2901 ram_list
.phys_dirty
= qemu_realloc(ram_list
.phys_dirty
,
2902 last_ram_offset() >> TARGET_PAGE_BITS
);
2903 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2904 0xff, size
>> TARGET_PAGE_BITS
);
2907 kvm_setup_guest_memory(new_block
->host
, size
);
2909 return new_block
->offset
;
2912 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
)
2914 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
);
2917 void qemu_ram_free(ram_addr_t addr
)
2921 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2922 if (addr
== block
->offset
) {
2923 QLIST_REMOVE(block
, next
);
2924 if (block
->flags
& RAM_PREALLOC_MASK
) {
2926 } else if (mem_path
) {
2927 #if defined (__linux__) && !defined(TARGET_S390X)
2929 munmap(block
->host
, block
->length
);
2932 qemu_vfree(block
->host
);
2938 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2939 munmap(block
->host
, block
->length
);
2941 qemu_vfree(block
->host
);
2952 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2959 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2960 offset
= addr
- block
->offset
;
2961 if (offset
< block
->length
) {
2962 vaddr
= block
->host
+ offset
;
2963 if (block
->flags
& RAM_PREALLOC_MASK
) {
2967 munmap(vaddr
, length
);
2969 #if defined(__linux__) && !defined(TARGET_S390X)
2972 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2975 flags
|= MAP_PRIVATE
;
2977 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2978 flags
, block
->fd
, offset
);
2980 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2981 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2988 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2989 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2990 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2993 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2994 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2998 if (area
!= vaddr
) {
2999 fprintf(stderr
, "Could not remap addr: %lx@%lx\n",
3003 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3009 #endif /* !_WIN32 */
3011 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3012 With the exception of the softmmu code in this file, this should
3013 only be used for local memory (e.g. video ram) that the device owns,
3014 and knows it isn't going to access beyond the end of the block.
3016 It should not be used for general purpose DMA.
3017 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3019 void *qemu_get_ram_ptr(ram_addr_t addr
)
3023 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3024 if (addr
- block
->offset
< block
->length
) {
3025 /* Move this entry to to start of the list. */
3026 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3027 QLIST_REMOVE(block
, next
);
3028 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3030 return block
->host
+ (addr
- block
->offset
);
3034 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3040 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3041 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3043 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3047 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3048 if (addr
- block
->offset
< block
->length
) {
3049 return block
->host
+ (addr
- block
->offset
);
3053 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3059 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3062 uint8_t *host
= ptr
;
3064 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3065 if (host
- block
->host
< block
->length
) {
3066 *ram_addr
= block
->offset
+ (host
- block
->host
);
3073 /* Some of the softmmu routines need to translate from a host pointer
3074 (typically a TLB entry) back to a ram offset. */
3075 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3077 ram_addr_t ram_addr
;
3079 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3080 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3086 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3088 #ifdef DEBUG_UNASSIGNED
3089 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3091 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3092 do_unassigned_access(addr
, 0, 0, 0, 1);
3097 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3099 #ifdef DEBUG_UNASSIGNED
3100 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3102 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3103 do_unassigned_access(addr
, 0, 0, 0, 2);
3108 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3110 #ifdef DEBUG_UNASSIGNED
3111 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3113 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3114 do_unassigned_access(addr
, 0, 0, 0, 4);
3119 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3121 #ifdef DEBUG_UNASSIGNED
3122 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3124 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3125 do_unassigned_access(addr
, 1, 0, 0, 1);
3129 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3131 #ifdef DEBUG_UNASSIGNED
3132 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3134 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3135 do_unassigned_access(addr
, 1, 0, 0, 2);
3139 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3141 #ifdef DEBUG_UNASSIGNED
3142 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3144 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3145 do_unassigned_access(addr
, 1, 0, 0, 4);
3149 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3150 unassigned_mem_readb
,
3151 unassigned_mem_readw
,
3152 unassigned_mem_readl
,
3155 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3156 unassigned_mem_writeb
,
3157 unassigned_mem_writew
,
3158 unassigned_mem_writel
,
3161 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3165 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3166 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3167 #if !defined(CONFIG_USER_ONLY)
3168 tb_invalidate_phys_page_fast(ram_addr
, 1);
3169 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3172 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3173 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3174 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3175 /* we remove the notdirty callback only if the code has been
3177 if (dirty_flags
== 0xff)
3178 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3181 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3185 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3186 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3187 #if !defined(CONFIG_USER_ONLY)
3188 tb_invalidate_phys_page_fast(ram_addr
, 2);
3189 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3192 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3193 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3194 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3195 /* we remove the notdirty callback only if the code has been
3197 if (dirty_flags
== 0xff)
3198 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3201 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3205 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3206 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3207 #if !defined(CONFIG_USER_ONLY)
3208 tb_invalidate_phys_page_fast(ram_addr
, 4);
3209 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3212 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3213 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3214 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3215 /* we remove the notdirty callback only if the code has been
3217 if (dirty_flags
== 0xff)
3218 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3221 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3222 NULL
, /* never used */
3223 NULL
, /* never used */
3224 NULL
, /* never used */
3227 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3228 notdirty_mem_writeb
,
3229 notdirty_mem_writew
,
3230 notdirty_mem_writel
,
3233 /* Generate a debug exception if a watchpoint has been hit. */
3234 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3236 CPUState
*env
= cpu_single_env
;
3237 target_ulong pc
, cs_base
;
3238 TranslationBlock
*tb
;
3243 if (env
->watchpoint_hit
) {
3244 /* We re-entered the check after replacing the TB. Now raise
3245 * the debug interrupt so that is will trigger after the
3246 * current instruction. */
3247 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3250 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3251 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3252 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3253 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3254 wp
->flags
|= BP_WATCHPOINT_HIT
;
3255 if (!env
->watchpoint_hit
) {
3256 env
->watchpoint_hit
= wp
;
3257 tb
= tb_find_pc(env
->mem_io_pc
);
3259 cpu_abort(env
, "check_watchpoint: could not find TB for "
3260 "pc=%p", (void *)env
->mem_io_pc
);
3262 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3263 tb_phys_invalidate(tb
, -1);
3264 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3265 env
->exception_index
= EXCP_DEBUG
;
3267 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3268 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3270 cpu_resume_from_signal(env
, NULL
);
3273 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3278 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3279 so these check for a hit then pass through to the normal out-of-line
3281 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3283 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3284 return ldub_phys(addr
);
3287 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3289 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3290 return lduw_phys(addr
);
3293 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3295 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3296 return ldl_phys(addr
);
3299 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3302 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3303 stb_phys(addr
, val
);
3306 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3309 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3310 stw_phys(addr
, val
);
3313 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3316 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3317 stl_phys(addr
, val
);
3320 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3326 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3332 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3333 target_phys_addr_t addr
,
3336 unsigned int idx
= SUBPAGE_IDX(addr
);
3337 #if defined(DEBUG_SUBPAGE)
3338 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3339 mmio
, len
, addr
, idx
);
3342 addr
+= mmio
->region_offset
[idx
];
3343 idx
= mmio
->sub_io_index
[idx
];
3344 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3347 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3348 uint32_t value
, unsigned int len
)
3350 unsigned int idx
= SUBPAGE_IDX(addr
);
3351 #if defined(DEBUG_SUBPAGE)
3352 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3353 __func__
, mmio
, len
, addr
, idx
, value
);
3356 addr
+= mmio
->region_offset
[idx
];
3357 idx
= mmio
->sub_io_index
[idx
];
3358 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3361 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3363 return subpage_readlen(opaque
, addr
, 0);
3366 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3369 subpage_writelen(opaque
, addr
, value
, 0);
3372 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3374 return subpage_readlen(opaque
, addr
, 1);
3377 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3380 subpage_writelen(opaque
, addr
, value
, 1);
3383 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3385 return subpage_readlen(opaque
, addr
, 2);
3388 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3391 subpage_writelen(opaque
, addr
, value
, 2);
3394 static CPUReadMemoryFunc
* const subpage_read
[] = {
3400 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3406 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3407 ram_addr_t memory
, ram_addr_t region_offset
)
3411 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3413 idx
= SUBPAGE_IDX(start
);
3414 eidx
= SUBPAGE_IDX(end
);
3415 #if defined(DEBUG_SUBPAGE)
3416 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3417 mmio
, start
, end
, idx
, eidx
, memory
);
3419 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
3420 memory
= IO_MEM_UNASSIGNED
;
3421 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3422 for (; idx
<= eidx
; idx
++) {
3423 mmio
->sub_io_index
[idx
] = memory
;
3424 mmio
->region_offset
[idx
] = region_offset
;
3430 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3431 ram_addr_t orig_memory
,
3432 ram_addr_t region_offset
)
3437 mmio
= qemu_mallocz(sizeof(subpage_t
));
3440 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3441 DEVICE_NATIVE_ENDIAN
);
3442 #if defined(DEBUG_SUBPAGE)
3443 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3444 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3446 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3447 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3452 static int get_free_io_mem_idx(void)
3456 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3457 if (!io_mem_used
[i
]) {
3461 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3466 * Usually, devices operate in little endian mode. There are devices out
3467 * there that operate in big endian too. Each device gets byte swapped
3468 * mmio if plugged onto a CPU that does the other endianness.
3478 typedef struct SwapEndianContainer
{
3479 CPUReadMemoryFunc
*read
[3];
3480 CPUWriteMemoryFunc
*write
[3];
3482 } SwapEndianContainer
;
3484 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3487 SwapEndianContainer
*c
= opaque
;
3488 val
= c
->read
[0](c
->opaque
, addr
);
3492 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3495 SwapEndianContainer
*c
= opaque
;
3496 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3500 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3503 SwapEndianContainer
*c
= opaque
;
3504 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3508 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3509 swapendian_mem_readb
,
3510 swapendian_mem_readw
,
3511 swapendian_mem_readl
3514 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3517 SwapEndianContainer
*c
= opaque
;
3518 c
->write
[0](c
->opaque
, addr
, val
);
3521 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3524 SwapEndianContainer
*c
= opaque
;
3525 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3528 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3531 SwapEndianContainer
*c
= opaque
;
3532 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3535 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3536 swapendian_mem_writeb
,
3537 swapendian_mem_writew
,
3538 swapendian_mem_writel
3541 static void swapendian_init(int io_index
)
3543 SwapEndianContainer
*c
= qemu_malloc(sizeof(SwapEndianContainer
));
3546 /* Swap mmio for big endian targets */
3547 c
->opaque
= io_mem_opaque
[io_index
];
3548 for (i
= 0; i
< 3; i
++) {
3549 c
->read
[i
] = io_mem_read
[io_index
][i
];
3550 c
->write
[i
] = io_mem_write
[io_index
][i
];
3552 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3553 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3555 io_mem_opaque
[io_index
] = c
;
3558 static void swapendian_del(int io_index
)
3560 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3561 qemu_free(io_mem_opaque
[io_index
]);
3565 /* mem_read and mem_write are arrays of functions containing the
3566 function to access byte (index 0), word (index 1) and dword (index
3567 2). Functions can be omitted with a NULL function pointer.
3568 If io_index is non zero, the corresponding io zone is
3569 modified. If it is zero, a new io zone is allocated. The return
3570 value can be used with cpu_register_physical_memory(). (-1) is
3571 returned if error. */
3572 static int cpu_register_io_memory_fixed(int io_index
,
3573 CPUReadMemoryFunc
* const *mem_read
,
3574 CPUWriteMemoryFunc
* const *mem_write
,
3575 void *opaque
, enum device_endian endian
)
3579 if (io_index
<= 0) {
3580 io_index
= get_free_io_mem_idx();
3584 io_index
>>= IO_MEM_SHIFT
;
3585 if (io_index
>= IO_MEM_NB_ENTRIES
)
3589 for (i
= 0; i
< 3; ++i
) {
3590 io_mem_read
[io_index
][i
]
3591 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3593 for (i
= 0; i
< 3; ++i
) {
3594 io_mem_write
[io_index
][i
]
3595 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3597 io_mem_opaque
[io_index
] = opaque
;
3600 case DEVICE_BIG_ENDIAN
:
3601 #ifndef TARGET_WORDS_BIGENDIAN
3602 swapendian_init(io_index
);
3605 case DEVICE_LITTLE_ENDIAN
:
3606 #ifdef TARGET_WORDS_BIGENDIAN
3607 swapendian_init(io_index
);
3610 case DEVICE_NATIVE_ENDIAN
:
3615 return (io_index
<< IO_MEM_SHIFT
);
3618 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3619 CPUWriteMemoryFunc
* const *mem_write
,
3620 void *opaque
, enum device_endian endian
)
3622 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3625 void cpu_unregister_io_memory(int io_table_address
)
3628 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3630 swapendian_del(io_index
);
3632 for (i
=0;i
< 3; i
++) {
3633 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3634 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3636 io_mem_opaque
[io_index
] = NULL
;
3637 io_mem_used
[io_index
] = 0;
3640 static void io_mem_init(void)
3644 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3645 unassigned_mem_write
, NULL
,
3646 DEVICE_NATIVE_ENDIAN
);
3647 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3648 unassigned_mem_write
, NULL
,
3649 DEVICE_NATIVE_ENDIAN
);
3650 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3651 notdirty_mem_write
, NULL
,
3652 DEVICE_NATIVE_ENDIAN
);
3656 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3657 watch_mem_write
, NULL
,
3658 DEVICE_NATIVE_ENDIAN
);
3661 #endif /* !defined(CONFIG_USER_ONLY) */
3663 /* physical memory access (slow version, mainly for debug) */
3664 #if defined(CONFIG_USER_ONLY)
3665 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3666 uint8_t *buf
, int len
, int is_write
)
3673 page
= addr
& TARGET_PAGE_MASK
;
3674 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3677 flags
= page_get_flags(page
);
3678 if (!(flags
& PAGE_VALID
))
3681 if (!(flags
& PAGE_WRITE
))
3683 /* XXX: this code should not depend on lock_user */
3684 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3687 unlock_user(p
, addr
, l
);
3689 if (!(flags
& PAGE_READ
))
3691 /* XXX: this code should not depend on lock_user */
3692 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3695 unlock_user(p
, addr
, 0);
3705 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3706 int len
, int is_write
)
3711 target_phys_addr_t page
;
3716 page
= addr
& TARGET_PAGE_MASK
;
3717 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3720 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3722 pd
= IO_MEM_UNASSIGNED
;
3724 pd
= p
->phys_offset
;
3728 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3729 target_phys_addr_t addr1
= addr
;
3730 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3732 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3733 /* XXX: could force cpu_single_env to NULL to avoid
3735 if (l
>= 4 && ((addr1
& 3) == 0)) {
3736 /* 32 bit write access */
3738 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3740 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3741 /* 16 bit write access */
3743 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3746 /* 8 bit write access */
3748 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3752 unsigned long addr1
;
3753 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3755 ptr
= qemu_get_ram_ptr(addr1
);
3756 memcpy(ptr
, buf
, l
);
3757 if (!cpu_physical_memory_is_dirty(addr1
)) {
3758 /* invalidate code */
3759 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3761 cpu_physical_memory_set_dirty_flags(
3762 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3766 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3767 !(pd
& IO_MEM_ROMD
)) {
3768 target_phys_addr_t addr1
= addr
;
3770 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3772 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3773 if (l
>= 4 && ((addr1
& 3) == 0)) {
3774 /* 32 bit read access */
3775 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3778 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3779 /* 16 bit read access */
3780 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3784 /* 8 bit read access */
3785 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3791 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3792 (addr
& ~TARGET_PAGE_MASK
);
3793 memcpy(buf
, ptr
, l
);
3802 /* used for ROM loading : can write in RAM and ROM */
3803 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3804 const uint8_t *buf
, int len
)
3808 target_phys_addr_t page
;
3813 page
= addr
& TARGET_PAGE_MASK
;
3814 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3817 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3819 pd
= IO_MEM_UNASSIGNED
;
3821 pd
= p
->phys_offset
;
3824 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3825 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3826 !(pd
& IO_MEM_ROMD
)) {
3829 unsigned long addr1
;
3830 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3832 ptr
= qemu_get_ram_ptr(addr1
);
3833 memcpy(ptr
, buf
, l
);
3843 target_phys_addr_t addr
;
3844 target_phys_addr_t len
;
3847 static BounceBuffer bounce
;
3849 typedef struct MapClient
{
3851 void (*callback
)(void *opaque
);
3852 QLIST_ENTRY(MapClient
) link
;
3855 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3856 = QLIST_HEAD_INITIALIZER(map_client_list
);
3858 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3860 MapClient
*client
= qemu_malloc(sizeof(*client
));
3862 client
->opaque
= opaque
;
3863 client
->callback
= callback
;
3864 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3868 void cpu_unregister_map_client(void *_client
)
3870 MapClient
*client
= (MapClient
*)_client
;
3872 QLIST_REMOVE(client
, link
);
3876 static void cpu_notify_map_clients(void)
3880 while (!QLIST_EMPTY(&map_client_list
)) {
3881 client
= QLIST_FIRST(&map_client_list
);
3882 client
->callback(client
->opaque
);
3883 cpu_unregister_map_client(client
);
3887 /* Map a physical memory region into a host virtual address.
3888 * May map a subset of the requested range, given by and returned in *plen.
3889 * May return NULL if resources needed to perform the mapping are exhausted.
3890 * Use only for reads OR writes - not for read-modify-write operations.
3891 * Use cpu_register_map_client() to know when retrying the map operation is
3892 * likely to succeed.
3894 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3895 target_phys_addr_t
*plen
,
3898 target_phys_addr_t len
= *plen
;
3899 target_phys_addr_t done
= 0;
3901 uint8_t *ret
= NULL
;
3903 target_phys_addr_t page
;
3906 unsigned long addr1
;
3909 page
= addr
& TARGET_PAGE_MASK
;
3910 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3913 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3915 pd
= IO_MEM_UNASSIGNED
;
3917 pd
= p
->phys_offset
;
3920 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3921 if (done
|| bounce
.buffer
) {
3924 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3928 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3930 ptr
= bounce
.buffer
;
3932 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3933 ptr
= qemu_get_ram_ptr(addr1
);
3937 } else if (ret
+ done
!= ptr
) {
3949 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3950 * Will also mark the memory as dirty if is_write == 1. access_len gives
3951 * the amount of memory that was actually read or written by the caller.
3953 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3954 int is_write
, target_phys_addr_t access_len
)
3956 if (buffer
!= bounce
.buffer
) {
3958 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3959 while (access_len
) {
3961 l
= TARGET_PAGE_SIZE
;
3964 if (!cpu_physical_memory_is_dirty(addr1
)) {
3965 /* invalidate code */
3966 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3968 cpu_physical_memory_set_dirty_flags(
3969 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3978 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3980 qemu_vfree(bounce
.buffer
);
3981 bounce
.buffer
= NULL
;
3982 cpu_notify_map_clients();
3985 /* warning: addr must be aligned */
3986 uint32_t ldl_phys(target_phys_addr_t addr
)
3994 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3996 pd
= IO_MEM_UNASSIGNED
;
3998 pd
= p
->phys_offset
;
4001 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4002 !(pd
& IO_MEM_ROMD
)) {
4004 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4006 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4007 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4010 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4011 (addr
& ~TARGET_PAGE_MASK
);
4017 /* warning: addr must be aligned */
4018 uint64_t ldq_phys(target_phys_addr_t addr
)
4026 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4028 pd
= IO_MEM_UNASSIGNED
;
4030 pd
= p
->phys_offset
;
4033 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4034 !(pd
& IO_MEM_ROMD
)) {
4036 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4038 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4039 #ifdef TARGET_WORDS_BIGENDIAN
4040 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4041 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4043 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4044 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4048 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4049 (addr
& ~TARGET_PAGE_MASK
);
4056 uint32_t ldub_phys(target_phys_addr_t addr
)
4059 cpu_physical_memory_read(addr
, &val
, 1);
4063 /* warning: addr must be aligned */
4064 uint32_t lduw_phys(target_phys_addr_t addr
)
4072 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4074 pd
= IO_MEM_UNASSIGNED
;
4076 pd
= p
->phys_offset
;
4079 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4080 !(pd
& IO_MEM_ROMD
)) {
4082 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4084 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4085 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4088 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4089 (addr
& ~TARGET_PAGE_MASK
);
4095 /* warning: addr must be aligned. The ram page is not masked as dirty
4096 and the code inside is not invalidated. It is useful if the dirty
4097 bits are used to track modified PTEs */
4098 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4105 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4107 pd
= IO_MEM_UNASSIGNED
;
4109 pd
= p
->phys_offset
;
4112 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4113 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4115 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4116 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4118 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4119 ptr
= qemu_get_ram_ptr(addr1
);
4122 if (unlikely(in_migration
)) {
4123 if (!cpu_physical_memory_is_dirty(addr1
)) {
4124 /* invalidate code */
4125 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4127 cpu_physical_memory_set_dirty_flags(
4128 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4134 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4141 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4143 pd
= IO_MEM_UNASSIGNED
;
4145 pd
= p
->phys_offset
;
4148 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4149 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4151 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4152 #ifdef TARGET_WORDS_BIGENDIAN
4153 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4154 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4156 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4157 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4160 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4161 (addr
& ~TARGET_PAGE_MASK
);
4166 /* warning: addr must be aligned */
4167 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4174 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4176 pd
= IO_MEM_UNASSIGNED
;
4178 pd
= p
->phys_offset
;
4181 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4182 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4184 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4185 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4187 unsigned long addr1
;
4188 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4190 ptr
= qemu_get_ram_ptr(addr1
);
4192 if (!cpu_physical_memory_is_dirty(addr1
)) {
4193 /* invalidate code */
4194 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4196 cpu_physical_memory_set_dirty_flags(addr1
,
4197 (0xff & ~CODE_DIRTY_FLAG
));
4203 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4206 cpu_physical_memory_write(addr
, &v
, 1);
4209 /* warning: addr must be aligned */
4210 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4217 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4219 pd
= IO_MEM_UNASSIGNED
;
4221 pd
= p
->phys_offset
;
4224 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4225 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4227 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4228 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4230 unsigned long addr1
;
4231 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4233 ptr
= qemu_get_ram_ptr(addr1
);
4235 if (!cpu_physical_memory_is_dirty(addr1
)) {
4236 /* invalidate code */
4237 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4239 cpu_physical_memory_set_dirty_flags(addr1
,
4240 (0xff & ~CODE_DIRTY_FLAG
));
4246 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4249 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
4252 /* virtual memory access for debug (includes writing to ROM) */
4253 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4254 uint8_t *buf
, int len
, int is_write
)
4257 target_phys_addr_t phys_addr
;
4261 page
= addr
& TARGET_PAGE_MASK
;
4262 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4263 /* if no physical page mapped, return an error */
4264 if (phys_addr
== -1)
4266 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4269 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4271 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4273 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4282 /* in deterministic execution mode, instructions doing device I/Os
4283 must be at the end of the TB */
4284 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4286 TranslationBlock
*tb
;
4288 target_ulong pc
, cs_base
;
4291 tb
= tb_find_pc((unsigned long)retaddr
);
4293 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4296 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4297 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
4298 /* Calculate how many instructions had been executed before the fault
4300 n
= n
- env
->icount_decr
.u16
.low
;
4301 /* Generate a new TB ending on the I/O insn. */
4303 /* On MIPS and SH, delay slot instructions can only be restarted if
4304 they were already the first instruction in the TB. If this is not
4305 the first instruction in a TB then re-execute the preceding
4307 #if defined(TARGET_MIPS)
4308 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4309 env
->active_tc
.PC
-= 4;
4310 env
->icount_decr
.u16
.low
++;
4311 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4313 #elif defined(TARGET_SH4)
4314 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4317 env
->icount_decr
.u16
.low
++;
4318 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4321 /* This should never happen. */
4322 if (n
> CF_COUNT_MASK
)
4323 cpu_abort(env
, "TB too big during recompile");
4325 cflags
= n
| CF_LAST_IO
;
4327 cs_base
= tb
->cs_base
;
4329 tb_phys_invalidate(tb
, -1);
4330 /* FIXME: In theory this could raise an exception. In practice
4331 we have already translated the block once so it's probably ok. */
4332 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4333 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4334 the first in the TB) then we end up generating a whole new TB and
4335 repeating the fault, which is horribly inefficient.
4336 Better would be to execute just this insn uncached, or generate a
4338 cpu_resume_from_signal(env
, NULL
);
4341 #if !defined(CONFIG_USER_ONLY)
4343 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4345 int i
, target_code_size
, max_target_code_size
;
4346 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4347 TranslationBlock
*tb
;
4349 target_code_size
= 0;
4350 max_target_code_size
= 0;
4352 direct_jmp_count
= 0;
4353 direct_jmp2_count
= 0;
4354 for(i
= 0; i
< nb_tbs
; i
++) {
4356 target_code_size
+= tb
->size
;
4357 if (tb
->size
> max_target_code_size
)
4358 max_target_code_size
= tb
->size
;
4359 if (tb
->page_addr
[1] != -1)
4361 if (tb
->tb_next_offset
[0] != 0xffff) {
4363 if (tb
->tb_next_offset
[1] != 0xffff) {
4364 direct_jmp2_count
++;
4368 /* XXX: avoid using doubles ? */
4369 cpu_fprintf(f
, "Translation buffer state:\n");
4370 cpu_fprintf(f
, "gen code size %td/%ld\n",
4371 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4372 cpu_fprintf(f
, "TB count %d/%d\n",
4373 nb_tbs
, code_gen_max_blocks
);
4374 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4375 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4376 max_target_code_size
);
4377 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4378 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4379 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4380 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4382 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4383 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4385 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4387 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4388 cpu_fprintf(f
, "\nStatistics:\n");
4389 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4390 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4391 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4392 tcg_dump_info(f
, cpu_fprintf
);
4395 #define MMUSUFFIX _cmmu
4396 #define GETPC() NULL
4397 #define env cpu_single_env
4398 #define SOFTMMU_CODE_ACCESS
4401 #include "softmmu_template.h"
4404 #include "softmmu_template.h"
4407 #include "softmmu_template.h"
4410 #include "softmmu_template.h"