2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
124 /* current CPU in the current thread. It is only valid inside
126 DEFINE_TLS(CPUState
*,cpu_single_env
);
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
132 typedef struct PageDesc
{
133 /* list of TBs intersecting this ram page */
134 TranslationBlock
*first_tb
;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count
;
138 uint8_t *code_bitmap
;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_size
;
187 unsigned long qemu_host_page_mask
;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map
[V_L1_SIZE
];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc
{
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset
;
197 ram_addr_t region_offset
;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map
[P_L1_SIZE
];
204 static void io_mem_init(void);
205 static void memory_map_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry
*freep
;
284 freep
= kinfo_getvmmap(getpid(), &cnt
);
287 for (i
= 0; i
< cnt
; i
++) {
288 unsigned long startaddr
, endaddr
;
290 startaddr
= freep
[i
].kve_start
;
291 endaddr
= freep
[i
].kve_end
;
292 if (h2g_valid(startaddr
)) {
293 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
295 if (h2g_valid(endaddr
)) {
296 endaddr
= h2g(endaddr
);
297 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
312 last_brk
= (unsigned long)sbrk(0);
314 f
= fopen("/compat/linux/proc/self/maps", "r");
319 unsigned long startaddr
, endaddr
;
322 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
324 if (n
== 2 && h2g_valid(startaddr
)) {
325 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
327 if (h2g_valid(endaddr
)) {
328 endaddr
= h2g(endaddr
);
332 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
344 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
362 /* Level 1. Always allocated. */
363 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
366 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
373 ALLOC(p
, sizeof(void *) * L2_SIZE
);
377 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
385 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
391 return pd
+ (index
& (L2_SIZE
- 1));
394 static inline PageDesc
*page_find(tb_page_addr_t index
)
396 return page_find_alloc(index
, 0);
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
406 /* Level 1. Always allocated. */
407 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
410 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
416 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
418 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 int first_index
= index
& ~(L2_SIZE
- 1);
430 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
432 for (i
= 0; i
< L2_SIZE
; i
++) {
433 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
434 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
438 return pd
+ (index
& (L2_SIZE
- 1));
441 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
443 return phys_page_find_alloc(index
, 0);
446 static void tlb_protect_code(ram_addr_t ram_addr
);
447 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
449 #define mmap_lock() do { } while(0)
450 #define mmap_unlock() do { } while(0)
453 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
455 #if defined(CONFIG_USER_ONLY)
456 /* Currently it is not recommended to allocate big chunks of data in
457 user mode. It will change when a dedicated libc will be used */
458 #define USE_STATIC_CODE_GEN_BUFFER
461 #ifdef USE_STATIC_CODE_GEN_BUFFER
462 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
463 __attribute__((aligned (CODE_GEN_ALIGN
)));
466 static void code_gen_alloc(unsigned long tb_size
)
468 #ifdef USE_STATIC_CODE_GEN_BUFFER
469 code_gen_buffer
= static_code_gen_buffer
;
470 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
471 map_exec(code_gen_buffer
, code_gen_buffer_size
);
473 code_gen_buffer_size
= tb_size
;
474 if (code_gen_buffer_size
== 0) {
475 #if defined(CONFIG_USER_ONLY)
476 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
478 /* XXX: needs adjustments */
479 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
482 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
483 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
484 /* The code gen buffer location may have constraints depending on
485 the host cpu and OS */
486 #if defined(__linux__)
491 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
492 #if defined(__x86_64__)
494 /* Cannot map more than that */
495 if (code_gen_buffer_size
> (800 * 1024 * 1024))
496 code_gen_buffer_size
= (800 * 1024 * 1024);
497 #elif defined(__sparc_v9__)
498 // Map the buffer below 2G, so we can use direct calls and branches
500 start
= (void *) 0x60000000UL
;
501 if (code_gen_buffer_size
> (512 * 1024 * 1024))
502 code_gen_buffer_size
= (512 * 1024 * 1024);
503 #elif defined(__arm__)
504 /* Keep the buffer no bigger than 16GB to branch between blocks */
505 if (code_gen_buffer_size
> 16 * 1024 * 1024)
506 code_gen_buffer_size
= 16 * 1024 * 1024;
507 #elif defined(__s390x__)
508 /* Map the buffer so that we can use direct calls and branches. */
509 /* We have a +- 4GB range on the branches; leave some slop. */
510 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
511 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
513 start
= (void *)0x90000000UL
;
515 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
516 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
518 if (code_gen_buffer
== MAP_FAILED
) {
519 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
523 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
524 || defined(__DragonFly__) || defined(__OpenBSD__) \
525 || defined(__NetBSD__)
529 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
530 #if defined(__x86_64__)
531 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
532 * 0x40000000 is free */
534 addr
= (void *)0x40000000;
535 /* Cannot map more than that */
536 if (code_gen_buffer_size
> (800 * 1024 * 1024))
537 code_gen_buffer_size
= (800 * 1024 * 1024);
538 #elif defined(__sparc_v9__)
539 // Map the buffer below 2G, so we can use direct calls and branches
541 addr
= (void *) 0x60000000UL
;
542 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
543 code_gen_buffer_size
= (512 * 1024 * 1024);
546 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
547 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
549 if (code_gen_buffer
== MAP_FAILED
) {
550 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
555 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
556 map_exec(code_gen_buffer
, code_gen_buffer_size
);
558 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
559 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
560 code_gen_buffer_max_size
= code_gen_buffer_size
-
561 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
562 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
563 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
566 /* Must be called before using the QEMU cpus. 'tb_size' is the size
567 (in bytes) allocated to the translation buffer. Zero means default
569 void tcg_exec_init(unsigned long tb_size
)
572 code_gen_alloc(tb_size
);
573 code_gen_ptr
= code_gen_buffer
;
575 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
576 /* There's no guest base to take into account, so go ahead and
577 initialize the prologue now. */
578 tcg_prologue_init(&tcg_ctx
);
582 bool tcg_enabled(void)
584 return code_gen_buffer
!= NULL
;
587 void cpu_exec_init_all(void)
589 #if !defined(CONFIG_USER_ONLY)
595 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
597 static int cpu_common_post_load(void *opaque
, int version_id
)
599 CPUState
*env
= opaque
;
601 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
602 version_id is increased. */
603 env
->interrupt_request
&= ~0x01;
609 static const VMStateDescription vmstate_cpu_common
= {
610 .name
= "cpu_common",
612 .minimum_version_id
= 1,
613 .minimum_version_id_old
= 1,
614 .post_load
= cpu_common_post_load
,
615 .fields
= (VMStateField
[]) {
616 VMSTATE_UINT32(halted
, CPUState
),
617 VMSTATE_UINT32(interrupt_request
, CPUState
),
618 VMSTATE_END_OF_LIST()
623 CPUState
*qemu_get_cpu(int cpu
)
625 CPUState
*env
= first_cpu
;
628 if (env
->cpu_index
== cpu
)
636 void cpu_exec_init(CPUState
*env
)
641 #if defined(CONFIG_USER_ONLY)
644 env
->next_cpu
= NULL
;
647 while (*penv
!= NULL
) {
648 penv
= &(*penv
)->next_cpu
;
651 env
->cpu_index
= cpu_index
;
653 QTAILQ_INIT(&env
->breakpoints
);
654 QTAILQ_INIT(&env
->watchpoints
);
655 #ifndef CONFIG_USER_ONLY
656 env
->thread_id
= qemu_get_thread_id();
659 #if defined(CONFIG_USER_ONLY)
662 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
663 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
664 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
665 cpu_save
, cpu_load
, env
);
669 /* Allocate a new translation block. Flush the translation buffer if
670 too many translation blocks or too much generated code. */
671 static TranslationBlock
*tb_alloc(target_ulong pc
)
673 TranslationBlock
*tb
;
675 if (nb_tbs
>= code_gen_max_blocks
||
676 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
684 void tb_free(TranslationBlock
*tb
)
686 /* In practice this is mostly used for single use temporary TB
687 Ignore the hard cases and just back up if this TB happens to
688 be the last one generated. */
689 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
690 code_gen_ptr
= tb
->tc_ptr
;
695 static inline void invalidate_page_bitmap(PageDesc
*p
)
697 if (p
->code_bitmap
) {
698 g_free(p
->code_bitmap
);
699 p
->code_bitmap
= NULL
;
701 p
->code_write_count
= 0;
704 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
706 static void page_flush_tb_1 (int level
, void **lp
)
715 for (i
= 0; i
< L2_SIZE
; ++i
) {
716 pd
[i
].first_tb
= NULL
;
717 invalidate_page_bitmap(pd
+ i
);
721 for (i
= 0; i
< L2_SIZE
; ++i
) {
722 page_flush_tb_1 (level
- 1, pp
+ i
);
727 static void page_flush_tb(void)
730 for (i
= 0; i
< V_L1_SIZE
; i
++) {
731 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
735 /* flush all the translation blocks */
736 /* XXX: tb_flush is currently not thread safe */
737 void tb_flush(CPUState
*env1
)
740 #if defined(DEBUG_FLUSH)
741 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
742 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
744 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
746 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
747 cpu_abort(env1
, "Internal error: code buffer overflow\n");
751 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
752 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
755 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
758 code_gen_ptr
= code_gen_buffer
;
759 /* XXX: flush processor icache at this point if cache flush is
764 #ifdef DEBUG_TB_CHECK
766 static void tb_invalidate_check(target_ulong address
)
768 TranslationBlock
*tb
;
770 address
&= TARGET_PAGE_MASK
;
771 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
772 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
773 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
774 address
>= tb
->pc
+ tb
->size
)) {
775 printf("ERROR invalidate: address=" TARGET_FMT_lx
776 " PC=%08lx size=%04x\n",
777 address
, (long)tb
->pc
, tb
->size
);
783 /* verify that all the pages have correct rights for code */
784 static void tb_page_check(void)
786 TranslationBlock
*tb
;
787 int i
, flags1
, flags2
;
789 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
790 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
791 flags1
= page_get_flags(tb
->pc
);
792 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
793 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
794 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
795 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
803 /* invalidate one TB */
804 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
807 TranslationBlock
*tb1
;
811 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
814 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
818 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
820 TranslationBlock
*tb1
;
826 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
828 *ptb
= tb1
->page_next
[n1
];
831 ptb
= &tb1
->page_next
[n1
];
835 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
837 TranslationBlock
*tb1
, **ptb
;
840 ptb
= &tb
->jmp_next
[n
];
843 /* find tb(n) in circular list */
847 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
848 if (n1
== n
&& tb1
== tb
)
851 ptb
= &tb1
->jmp_first
;
853 ptb
= &tb1
->jmp_next
[n1
];
856 /* now we can suppress tb(n) from the list */
857 *ptb
= tb
->jmp_next
[n
];
859 tb
->jmp_next
[n
] = NULL
;
863 /* reset the jump entry 'n' of a TB so that it is not chained to
865 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
867 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
870 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
875 tb_page_addr_t phys_pc
;
876 TranslationBlock
*tb1
, *tb2
;
878 /* remove the TB from the hash list */
879 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
880 h
= tb_phys_hash_func(phys_pc
);
881 tb_remove(&tb_phys_hash
[h
], tb
,
882 offsetof(TranslationBlock
, phys_hash_next
));
884 /* remove the TB from the page list */
885 if (tb
->page_addr
[0] != page_addr
) {
886 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
887 tb_page_remove(&p
->first_tb
, tb
);
888 invalidate_page_bitmap(p
);
890 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
891 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
892 tb_page_remove(&p
->first_tb
, tb
);
893 invalidate_page_bitmap(p
);
896 tb_invalidated_flag
= 1;
898 /* remove the TB from the hash list */
899 h
= tb_jmp_cache_hash_func(tb
->pc
);
900 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
901 if (env
->tb_jmp_cache
[h
] == tb
)
902 env
->tb_jmp_cache
[h
] = NULL
;
905 /* suppress this TB from the two jump lists */
906 tb_jmp_remove(tb
, 0);
907 tb_jmp_remove(tb
, 1);
909 /* suppress any remaining jumps to this TB */
915 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
916 tb2
= tb1
->jmp_next
[n1
];
917 tb_reset_jump(tb1
, n1
);
918 tb1
->jmp_next
[n1
] = NULL
;
921 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
923 tb_phys_invalidate_count
++;
926 static inline void set_bits(uint8_t *tab
, int start
, int len
)
932 mask
= 0xff << (start
& 7);
933 if ((start
& ~7) == (end
& ~7)) {
935 mask
&= ~(0xff << (end
& 7));
940 start
= (start
+ 8) & ~7;
942 while (start
< end1
) {
947 mask
= ~(0xff << (end
& 7));
953 static void build_page_bitmap(PageDesc
*p
)
955 int n
, tb_start
, tb_end
;
956 TranslationBlock
*tb
;
958 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
963 tb
= (TranslationBlock
*)((long)tb
& ~3);
964 /* NOTE: this is subtle as a TB may span two physical pages */
966 /* NOTE: tb_end may be after the end of the page, but
967 it is not a problem */
968 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
969 tb_end
= tb_start
+ tb
->size
;
970 if (tb_end
> TARGET_PAGE_SIZE
)
971 tb_end
= TARGET_PAGE_SIZE
;
974 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
976 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
977 tb
= tb
->page_next
[n
];
981 TranslationBlock
*tb_gen_code(CPUState
*env
,
982 target_ulong pc
, target_ulong cs_base
,
983 int flags
, int cflags
)
985 TranslationBlock
*tb
;
987 tb_page_addr_t phys_pc
, phys_page2
;
988 target_ulong virt_page2
;
991 phys_pc
= get_page_addr_code(env
, pc
);
994 /* flush must be done */
996 /* cannot fail at this point */
998 /* Don't forget to invalidate previous TB info. */
999 tb_invalidated_flag
= 1;
1001 tc_ptr
= code_gen_ptr
;
1002 tb
->tc_ptr
= tc_ptr
;
1003 tb
->cs_base
= cs_base
;
1005 tb
->cflags
= cflags
;
1006 cpu_gen_code(env
, tb
, &code_gen_size
);
1007 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1009 /* check next page if needed */
1010 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1012 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1013 phys_page2
= get_page_addr_code(env
, virt_page2
);
1015 tb_link_page(tb
, phys_pc
, phys_page2
);
1019 /* invalidate all TBs which intersect with the target physical page
1020 starting in range [start;end[. NOTE: start and end must refer to
1021 the same physical page. 'is_cpu_write_access' should be true if called
1022 from a real cpu write access: the virtual CPU will exit the current
1023 TB if code is modified inside this TB. */
1024 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1025 int is_cpu_write_access
)
1027 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1028 CPUState
*env
= cpu_single_env
;
1029 tb_page_addr_t tb_start
, tb_end
;
1032 #ifdef TARGET_HAS_PRECISE_SMC
1033 int current_tb_not_found
= is_cpu_write_access
;
1034 TranslationBlock
*current_tb
= NULL
;
1035 int current_tb_modified
= 0;
1036 target_ulong current_pc
= 0;
1037 target_ulong current_cs_base
= 0;
1038 int current_flags
= 0;
1039 #endif /* TARGET_HAS_PRECISE_SMC */
1041 p
= page_find(start
>> TARGET_PAGE_BITS
);
1044 if (!p
->code_bitmap
&&
1045 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1046 is_cpu_write_access
) {
1047 /* build code bitmap */
1048 build_page_bitmap(p
);
1051 /* we remove all the TBs in the range [start, end[ */
1052 /* XXX: see if in some cases it could be faster to invalidate all the code */
1054 while (tb
!= NULL
) {
1056 tb
= (TranslationBlock
*)((long)tb
& ~3);
1057 tb_next
= tb
->page_next
[n
];
1058 /* NOTE: this is subtle as a TB may span two physical pages */
1060 /* NOTE: tb_end may be after the end of the page, but
1061 it is not a problem */
1062 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1063 tb_end
= tb_start
+ tb
->size
;
1065 tb_start
= tb
->page_addr
[1];
1066 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1068 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1069 #ifdef TARGET_HAS_PRECISE_SMC
1070 if (current_tb_not_found
) {
1071 current_tb_not_found
= 0;
1073 if (env
->mem_io_pc
) {
1074 /* now we have a real cpu fault */
1075 current_tb
= tb_find_pc(env
->mem_io_pc
);
1078 if (current_tb
== tb
&&
1079 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1080 /* If we are modifying the current TB, we must stop
1081 its execution. We could be more precise by checking
1082 that the modification is after the current PC, but it
1083 would require a specialized function to partially
1084 restore the CPU state */
1086 current_tb_modified
= 1;
1087 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1088 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1091 #endif /* TARGET_HAS_PRECISE_SMC */
1092 /* we need to do that to handle the case where a signal
1093 occurs while doing tb_phys_invalidate() */
1096 saved_tb
= env
->current_tb
;
1097 env
->current_tb
= NULL
;
1099 tb_phys_invalidate(tb
, -1);
1101 env
->current_tb
= saved_tb
;
1102 if (env
->interrupt_request
&& env
->current_tb
)
1103 cpu_interrupt(env
, env
->interrupt_request
);
1108 #if !defined(CONFIG_USER_ONLY)
1109 /* if no code remaining, no need to continue to use slow writes */
1111 invalidate_page_bitmap(p
);
1112 if (is_cpu_write_access
) {
1113 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1117 #ifdef TARGET_HAS_PRECISE_SMC
1118 if (current_tb_modified
) {
1119 /* we generate a block containing just the instruction
1120 modifying the memory. It will ensure that it cannot modify
1122 env
->current_tb
= NULL
;
1123 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1124 cpu_resume_from_signal(env
, NULL
);
1129 /* len must be <= 8 and start must be a multiple of len */
1130 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1136 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1137 cpu_single_env
->mem_io_vaddr
, len
,
1138 cpu_single_env
->eip
,
1139 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1142 p
= page_find(start
>> TARGET_PAGE_BITS
);
1145 if (p
->code_bitmap
) {
1146 offset
= start
& ~TARGET_PAGE_MASK
;
1147 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1148 if (b
& ((1 << len
) - 1))
1152 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1156 #if !defined(CONFIG_SOFTMMU)
1157 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1158 unsigned long pc
, void *puc
)
1160 TranslationBlock
*tb
;
1163 #ifdef TARGET_HAS_PRECISE_SMC
1164 TranslationBlock
*current_tb
= NULL
;
1165 CPUState
*env
= cpu_single_env
;
1166 int current_tb_modified
= 0;
1167 target_ulong current_pc
= 0;
1168 target_ulong current_cs_base
= 0;
1169 int current_flags
= 0;
1172 addr
&= TARGET_PAGE_MASK
;
1173 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1177 #ifdef TARGET_HAS_PRECISE_SMC
1178 if (tb
&& pc
!= 0) {
1179 current_tb
= tb_find_pc(pc
);
1182 while (tb
!= NULL
) {
1184 tb
= (TranslationBlock
*)((long)tb
& ~3);
1185 #ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb
== tb
&&
1187 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1188 /* If we are modifying the current TB, we must stop
1189 its execution. We could be more precise by checking
1190 that the modification is after the current PC, but it
1191 would require a specialized function to partially
1192 restore the CPU state */
1194 current_tb_modified
= 1;
1195 cpu_restore_state(current_tb
, env
, pc
);
1196 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1199 #endif /* TARGET_HAS_PRECISE_SMC */
1200 tb_phys_invalidate(tb
, addr
);
1201 tb
= tb
->page_next
[n
];
1204 #ifdef TARGET_HAS_PRECISE_SMC
1205 if (current_tb_modified
) {
1206 /* we generate a block containing just the instruction
1207 modifying the memory. It will ensure that it cannot modify
1209 env
->current_tb
= NULL
;
1210 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1211 cpu_resume_from_signal(env
, puc
);
1217 /* add the tb in the target page and protect it if necessary */
1218 static inline void tb_alloc_page(TranslationBlock
*tb
,
1219 unsigned int n
, tb_page_addr_t page_addr
)
1222 #ifndef CONFIG_USER_ONLY
1223 bool page_already_protected
;
1226 tb
->page_addr
[n
] = page_addr
;
1227 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1228 tb
->page_next
[n
] = p
->first_tb
;
1229 #ifndef CONFIG_USER_ONLY
1230 page_already_protected
= p
->first_tb
!= NULL
;
1232 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1233 invalidate_page_bitmap(p
);
1235 #if defined(TARGET_HAS_SMC) || 1
1237 #if defined(CONFIG_USER_ONLY)
1238 if (p
->flags
& PAGE_WRITE
) {
1243 /* force the host page as non writable (writes will have a
1244 page fault + mprotect overhead) */
1245 page_addr
&= qemu_host_page_mask
;
1247 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1248 addr
+= TARGET_PAGE_SIZE
) {
1250 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1254 p2
->flags
&= ~PAGE_WRITE
;
1256 mprotect(g2h(page_addr
), qemu_host_page_size
,
1257 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1258 #ifdef DEBUG_TB_INVALIDATE
1259 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1264 /* if some code is already present, then the pages are already
1265 protected. So we handle the case where only the first TB is
1266 allocated in a physical page */
1267 if (!page_already_protected
) {
1268 tlb_protect_code(page_addr
);
1272 #endif /* TARGET_HAS_SMC */
1275 /* add a new TB and link it to the physical page tables. phys_page2 is
1276 (-1) to indicate that only one page contains the TB. */
1277 void tb_link_page(TranslationBlock
*tb
,
1278 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1281 TranslationBlock
**ptb
;
1283 /* Grab the mmap lock to stop another thread invalidating this TB
1284 before we are done. */
1286 /* add in the physical hash table */
1287 h
= tb_phys_hash_func(phys_pc
);
1288 ptb
= &tb_phys_hash
[h
];
1289 tb
->phys_hash_next
= *ptb
;
1292 /* add in the page list */
1293 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1294 if (phys_page2
!= -1)
1295 tb_alloc_page(tb
, 1, phys_page2
);
1297 tb
->page_addr
[1] = -1;
1299 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1300 tb
->jmp_next
[0] = NULL
;
1301 tb
->jmp_next
[1] = NULL
;
1303 /* init original jump addresses */
1304 if (tb
->tb_next_offset
[0] != 0xffff)
1305 tb_reset_jump(tb
, 0);
1306 if (tb
->tb_next_offset
[1] != 0xffff)
1307 tb_reset_jump(tb
, 1);
1309 #ifdef DEBUG_TB_CHECK
1315 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1316 tb[1].tc_ptr. Return NULL if not found */
1317 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1319 int m_min
, m_max
, m
;
1321 TranslationBlock
*tb
;
1325 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1326 tc_ptr
>= (unsigned long)code_gen_ptr
)
1328 /* binary search (cf Knuth) */
1331 while (m_min
<= m_max
) {
1332 m
= (m_min
+ m_max
) >> 1;
1334 v
= (unsigned long)tb
->tc_ptr
;
1337 else if (tc_ptr
< v
) {
1346 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1348 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1350 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1353 tb1
= tb
->jmp_next
[n
];
1355 /* find head of list */
1358 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1361 tb1
= tb1
->jmp_next
[n1
];
1363 /* we are now sure now that tb jumps to tb1 */
1366 /* remove tb from the jmp_first list */
1367 ptb
= &tb_next
->jmp_first
;
1371 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1372 if (n1
== n
&& tb1
== tb
)
1374 ptb
= &tb1
->jmp_next
[n1
];
1376 *ptb
= tb
->jmp_next
[n
];
1377 tb
->jmp_next
[n
] = NULL
;
1379 /* suppress the jump to next tb in generated code */
1380 tb_reset_jump(tb
, n
);
1382 /* suppress jumps in the tb on which we could have jumped */
1383 tb_reset_jump_recursive(tb_next
);
1387 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1389 tb_reset_jump_recursive2(tb
, 0);
1390 tb_reset_jump_recursive2(tb
, 1);
1393 #if defined(TARGET_HAS_ICE)
1394 #if defined(CONFIG_USER_ONLY)
1395 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1397 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1400 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1402 target_phys_addr_t addr
;
1404 ram_addr_t ram_addr
;
1407 addr
= cpu_get_phys_page_debug(env
, pc
);
1408 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1410 pd
= IO_MEM_UNASSIGNED
;
1412 pd
= p
->phys_offset
;
1414 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1415 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1418 #endif /* TARGET_HAS_ICE */
1420 #if defined(CONFIG_USER_ONLY)
1421 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1426 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1427 int flags
, CPUWatchpoint
**watchpoint
)
1432 /* Add a watchpoint. */
1433 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1434 int flags
, CPUWatchpoint
**watchpoint
)
1436 target_ulong len_mask
= ~(len
- 1);
1439 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1440 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1441 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1442 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1445 wp
= g_malloc(sizeof(*wp
));
1448 wp
->len_mask
= len_mask
;
1451 /* keep all GDB-injected watchpoints in front */
1453 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1455 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1457 tlb_flush_page(env
, addr
);
1464 /* Remove a specific watchpoint. */
1465 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1468 target_ulong len_mask
= ~(len
- 1);
1471 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1472 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1473 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1474 cpu_watchpoint_remove_by_ref(env
, wp
);
1481 /* Remove a specific watchpoint by reference. */
1482 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1484 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1486 tlb_flush_page(env
, watchpoint
->vaddr
);
1491 /* Remove all matching watchpoints. */
1492 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1494 CPUWatchpoint
*wp
, *next
;
1496 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1497 if (wp
->flags
& mask
)
1498 cpu_watchpoint_remove_by_ref(env
, wp
);
1503 /* Add a breakpoint. */
1504 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1505 CPUBreakpoint
**breakpoint
)
1507 #if defined(TARGET_HAS_ICE)
1510 bp
= g_malloc(sizeof(*bp
));
1515 /* keep all GDB-injected breakpoints in front */
1517 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1519 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1521 breakpoint_invalidate(env
, pc
);
1531 /* Remove a specific breakpoint. */
1532 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1534 #if defined(TARGET_HAS_ICE)
1537 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1538 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1539 cpu_breakpoint_remove_by_ref(env
, bp
);
1549 /* Remove a specific breakpoint by reference. */
1550 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1552 #if defined(TARGET_HAS_ICE)
1553 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1555 breakpoint_invalidate(env
, breakpoint
->pc
);
1561 /* Remove all matching breakpoints. */
1562 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1564 #if defined(TARGET_HAS_ICE)
1565 CPUBreakpoint
*bp
, *next
;
1567 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1568 if (bp
->flags
& mask
)
1569 cpu_breakpoint_remove_by_ref(env
, bp
);
1574 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1575 CPU loop after each instruction */
1576 void cpu_single_step(CPUState
*env
, int enabled
)
1578 #if defined(TARGET_HAS_ICE)
1579 if (env
->singlestep_enabled
!= enabled
) {
1580 env
->singlestep_enabled
= enabled
;
1582 kvm_update_guest_debug(env
, 0);
1584 /* must flush all the translated code to avoid inconsistencies */
1585 /* XXX: only flush what is necessary */
1592 /* enable or disable low levels log */
1593 void cpu_set_log(int log_flags
)
1595 loglevel
= log_flags
;
1596 if (loglevel
&& !logfile
) {
1597 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1599 perror(logfilename
);
1602 #if !defined(CONFIG_SOFTMMU)
1603 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1605 static char logfile_buf
[4096];
1606 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1608 #elif defined(_WIN32)
1609 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1610 setvbuf(logfile
, NULL
, _IONBF
, 0);
1612 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1616 if (!loglevel
&& logfile
) {
1622 void cpu_set_log_filename(const char *filename
)
1624 logfilename
= strdup(filename
);
1629 cpu_set_log(loglevel
);
1632 static void cpu_unlink_tb(CPUState
*env
)
1634 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1635 problem and hope the cpu will stop of its own accord. For userspace
1636 emulation this often isn't actually as bad as it sounds. Often
1637 signals are used primarily to interrupt blocking syscalls. */
1638 TranslationBlock
*tb
;
1639 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1641 spin_lock(&interrupt_lock
);
1642 tb
= env
->current_tb
;
1643 /* if the cpu is currently executing code, we must unlink it and
1644 all the potentially executing TB */
1646 env
->current_tb
= NULL
;
1647 tb_reset_jump_recursive(tb
);
1649 spin_unlock(&interrupt_lock
);
1652 #ifndef CONFIG_USER_ONLY
1653 /* mask must never be zero, except for A20 change call */
1654 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1658 old_mask
= env
->interrupt_request
;
1659 env
->interrupt_request
|= mask
;
1662 * If called from iothread context, wake the target cpu in
1665 if (!qemu_cpu_is_self(env
)) {
1671 env
->icount_decr
.u16
.high
= 0xffff;
1673 && (mask
& ~old_mask
) != 0) {
1674 cpu_abort(env
, "Raised interrupt while not in I/O function");
1681 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1683 #else /* CONFIG_USER_ONLY */
1685 void cpu_interrupt(CPUState
*env
, int mask
)
1687 env
->interrupt_request
|= mask
;
1690 #endif /* CONFIG_USER_ONLY */
1692 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1694 env
->interrupt_request
&= ~mask
;
1697 void cpu_exit(CPUState
*env
)
1699 env
->exit_request
= 1;
1703 const CPULogItem cpu_log_items
[] = {
1704 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1705 "show generated host assembly code for each compiled TB" },
1706 { CPU_LOG_TB_IN_ASM
, "in_asm",
1707 "show target assembly code for each compiled TB" },
1708 { CPU_LOG_TB_OP
, "op",
1709 "show micro ops for each compiled TB" },
1710 { CPU_LOG_TB_OP_OPT
, "op_opt",
1713 "before eflags optimization and "
1715 "after liveness analysis" },
1716 { CPU_LOG_INT
, "int",
1717 "show interrupts/exceptions in short format" },
1718 { CPU_LOG_EXEC
, "exec",
1719 "show trace before each executed TB (lots of logs)" },
1720 { CPU_LOG_TB_CPU
, "cpu",
1721 "show CPU state before block translation" },
1723 { CPU_LOG_PCALL
, "pcall",
1724 "show protected mode far calls/returns/exceptions" },
1725 { CPU_LOG_RESET
, "cpu_reset",
1726 "show CPU state before CPU resets" },
1729 { CPU_LOG_IOPORT
, "ioport",
1730 "show all i/o ports accesses" },
1735 static int cmp1(const char *s1
, int n
, const char *s2
)
1737 if (strlen(s2
) != n
)
1739 return memcmp(s1
, s2
, n
) == 0;
1742 /* takes a comma separated list of log masks. Return 0 if error. */
1743 int cpu_str_to_log_mask(const char *str
)
1745 const CPULogItem
*item
;
1752 p1
= strchr(p
, ',');
1755 if(cmp1(p
,p1
-p
,"all")) {
1756 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1760 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1761 if (cmp1(p
, p1
- p
, item
->name
))
1775 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1782 fprintf(stderr
, "qemu: fatal: ");
1783 vfprintf(stderr
, fmt
, ap
);
1784 fprintf(stderr
, "\n");
1786 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1788 cpu_dump_state(env
, stderr
, fprintf
, 0);
1790 if (qemu_log_enabled()) {
1791 qemu_log("qemu: fatal: ");
1792 qemu_log_vprintf(fmt
, ap2
);
1795 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1797 log_cpu_state(env
, 0);
1804 #if defined(CONFIG_USER_ONLY)
1806 struct sigaction act
;
1807 sigfillset(&act
.sa_mask
);
1808 act
.sa_handler
= SIG_DFL
;
1809 sigaction(SIGABRT
, &act
, NULL
);
1815 CPUState
*cpu_copy(CPUState
*env
)
1817 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1818 CPUState
*next_cpu
= new_env
->next_cpu
;
1819 int cpu_index
= new_env
->cpu_index
;
1820 #if defined(TARGET_HAS_ICE)
1825 memcpy(new_env
, env
, sizeof(CPUState
));
1827 /* Preserve chaining and index. */
1828 new_env
->next_cpu
= next_cpu
;
1829 new_env
->cpu_index
= cpu_index
;
1831 /* Clone all break/watchpoints.
1832 Note: Once we support ptrace with hw-debug register access, make sure
1833 BP_CPU break/watchpoints are handled correctly on clone. */
1834 QTAILQ_INIT(&env
->breakpoints
);
1835 QTAILQ_INIT(&env
->watchpoints
);
1836 #if defined(TARGET_HAS_ICE)
1837 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1838 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1840 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1841 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1849 #if !defined(CONFIG_USER_ONLY)
1851 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1855 /* Discard jump cache entries for any tb which might potentially
1856 overlap the flushed page. */
1857 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1858 memset (&env
->tb_jmp_cache
[i
], 0,
1859 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1861 i
= tb_jmp_cache_hash_page(addr
);
1862 memset (&env
->tb_jmp_cache
[i
], 0,
1863 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1866 static CPUTLBEntry s_cputlb_empty_entry
= {
1873 /* NOTE: if flush_global is true, also flush global entries (not
1875 void tlb_flush(CPUState
*env
, int flush_global
)
1879 #if defined(DEBUG_TLB)
1880 printf("tlb_flush:\n");
1882 /* must reset current TB so that interrupts cannot modify the
1883 links while we are modifying them */
1884 env
->current_tb
= NULL
;
1886 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1888 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1889 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1893 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1895 env
->tlb_flush_addr
= -1;
1896 env
->tlb_flush_mask
= 0;
1900 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1902 if (addr
== (tlb_entry
->addr_read
&
1903 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1904 addr
== (tlb_entry
->addr_write
&
1905 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1906 addr
== (tlb_entry
->addr_code
&
1907 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1908 *tlb_entry
= s_cputlb_empty_entry
;
1912 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1917 #if defined(DEBUG_TLB)
1918 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1920 /* Check if we need to flush due to large pages. */
1921 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: forced full flush ("
1924 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1925 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1930 /* must reset current TB so that interrupts cannot modify the
1931 links while we are modifying them */
1932 env
->current_tb
= NULL
;
1934 addr
&= TARGET_PAGE_MASK
;
1935 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1936 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1937 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1939 tlb_flush_jmp_cache(env
, addr
);
1942 /* update the TLBs so that writes to code in the virtual page 'addr'
1944 static void tlb_protect_code(ram_addr_t ram_addr
)
1946 cpu_physical_memory_reset_dirty(ram_addr
,
1947 ram_addr
+ TARGET_PAGE_SIZE
,
1951 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1952 tested for self modifying code */
1953 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1956 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1959 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1960 unsigned long start
, unsigned long length
)
1963 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1964 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1965 if ((addr
- start
) < length
) {
1966 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1971 /* Note: start and end must be within the same ram block. */
1972 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1976 unsigned long length
, start1
;
1979 start
&= TARGET_PAGE_MASK
;
1980 end
= TARGET_PAGE_ALIGN(end
);
1982 length
= end
- start
;
1985 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1987 /* we modify the TLB cache so that the dirty bit will be set again
1988 when accessing the range */
1989 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
1990 /* Check that we don't span multiple blocks - this breaks the
1991 address comparisons below. */
1992 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
1993 != (end
- 1) - start
) {
1997 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1999 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2000 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2001 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2007 int cpu_physical_memory_set_dirty_tracking(int enable
)
2010 in_migration
= enable
;
2014 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2016 ram_addr_t ram_addr
;
2019 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2020 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2021 + tlb_entry
->addend
);
2022 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2023 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2024 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2029 /* update the TLB according to the current state of the dirty bits */
2030 void cpu_tlb_update_dirty(CPUState
*env
)
2034 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2035 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2036 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2040 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2042 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2043 tlb_entry
->addr_write
= vaddr
;
2046 /* update the TLB corresponding to virtual page vaddr
2047 so that it is no longer dirty */
2048 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2053 vaddr
&= TARGET_PAGE_MASK
;
2054 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2055 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2056 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2059 /* Our TLB does not support large pages, so remember the area covered by
2060 large pages and trigger a full TLB flush if these are invalidated. */
2061 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2064 target_ulong mask
= ~(size
- 1);
2066 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2067 env
->tlb_flush_addr
= vaddr
& mask
;
2068 env
->tlb_flush_mask
= mask
;
2071 /* Extend the existing region to include the new page.
2072 This is a compromise between unnecessary flushes and the cost
2073 of maintaining a full variable size TLB. */
2074 mask
&= env
->tlb_flush_mask
;
2075 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2078 env
->tlb_flush_addr
&= mask
;
2079 env
->tlb_flush_mask
= mask
;
2082 /* Add a new TLB entry. At most one entry for a given virtual address
2083 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2084 supplied size is only used by tlb_flush_page. */
2085 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2086 target_phys_addr_t paddr
, int prot
,
2087 int mmu_idx
, target_ulong size
)
2092 target_ulong address
;
2093 target_ulong code_address
;
2094 unsigned long addend
;
2097 target_phys_addr_t iotlb
;
2099 assert(size
>= TARGET_PAGE_SIZE
);
2100 if (size
!= TARGET_PAGE_SIZE
) {
2101 tlb_add_large_page(env
, vaddr
, size
);
2103 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2105 pd
= IO_MEM_UNASSIGNED
;
2107 pd
= p
->phys_offset
;
2109 #if defined(DEBUG_TLB)
2110 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2111 " prot=%x idx=%d pd=0x%08lx\n",
2112 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2116 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2117 /* IO memory case (romd handled later) */
2118 address
|= TLB_MMIO
;
2120 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2121 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2123 iotlb
= pd
& TARGET_PAGE_MASK
;
2124 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2125 iotlb
|= IO_MEM_NOTDIRTY
;
2127 iotlb
|= IO_MEM_ROM
;
2129 /* IO handlers are currently passed a physical address.
2130 It would be nice to pass an offset from the base address
2131 of that region. This would avoid having to special case RAM,
2132 and avoid full address decoding in every device.
2133 We can't use the high bits of pd for this because
2134 IO_MEM_ROMD uses these as a ram address. */
2135 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2137 iotlb
+= p
->region_offset
;
2143 code_address
= address
;
2144 /* Make accesses to pages with watchpoints go via the
2145 watchpoint trap routines. */
2146 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2147 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2148 /* Avoid trapping reads of pages with a write breakpoint. */
2149 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2150 iotlb
= io_mem_watch
+ paddr
;
2151 address
|= TLB_MMIO
;
2157 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2158 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2159 te
= &env
->tlb_table
[mmu_idx
][index
];
2160 te
->addend
= addend
- vaddr
;
2161 if (prot
& PAGE_READ
) {
2162 te
->addr_read
= address
;
2167 if (prot
& PAGE_EXEC
) {
2168 te
->addr_code
= code_address
;
2172 if (prot
& PAGE_WRITE
) {
2173 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2174 (pd
& IO_MEM_ROMD
)) {
2175 /* Write access calls the I/O callback. */
2176 te
->addr_write
= address
| TLB_MMIO
;
2177 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2178 !cpu_physical_memory_is_dirty(pd
)) {
2179 te
->addr_write
= address
| TLB_NOTDIRTY
;
2181 te
->addr_write
= address
;
2184 te
->addr_write
= -1;
2190 void tlb_flush(CPUState
*env
, int flush_global
)
2194 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2199 * Walks guest process memory "regions" one by one
2200 * and calls callback function 'fn' for each region.
2203 struct walk_memory_regions_data
2205 walk_memory_regions_fn fn
;
2207 unsigned long start
;
2211 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2212 abi_ulong end
, int new_prot
)
2214 if (data
->start
!= -1ul) {
2215 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2221 data
->start
= (new_prot
? end
: -1ul);
2222 data
->prot
= new_prot
;
2227 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2228 abi_ulong base
, int level
, void **lp
)
2234 return walk_memory_regions_end(data
, base
, 0);
2239 for (i
= 0; i
< L2_SIZE
; ++i
) {
2240 int prot
= pd
[i
].flags
;
2242 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2243 if (prot
!= data
->prot
) {
2244 rc
= walk_memory_regions_end(data
, pa
, prot
);
2252 for (i
= 0; i
< L2_SIZE
; ++i
) {
2253 pa
= base
| ((abi_ulong
)i
<<
2254 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2255 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2265 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2267 struct walk_memory_regions_data data
;
2275 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2276 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2277 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2283 return walk_memory_regions_end(&data
, 0, 0);
2286 static int dump_region(void *priv
, abi_ulong start
,
2287 abi_ulong end
, unsigned long prot
)
2289 FILE *f
= (FILE *)priv
;
2291 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2292 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2293 start
, end
, end
- start
,
2294 ((prot
& PAGE_READ
) ? 'r' : '-'),
2295 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2296 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2301 /* dump memory mappings */
2302 void page_dump(FILE *f
)
2304 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2305 "start", "end", "size", "prot");
2306 walk_memory_regions(f
, dump_region
);
2309 int page_get_flags(target_ulong address
)
2313 p
= page_find(address
>> TARGET_PAGE_BITS
);
2319 /* Modify the flags of a page and invalidate the code if necessary.
2320 The flag PAGE_WRITE_ORG is positioned automatically depending
2321 on PAGE_WRITE. The mmap_lock should already be held. */
2322 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2324 target_ulong addr
, len
;
2326 /* This function should never be called with addresses outside the
2327 guest address space. If this assert fires, it probably indicates
2328 a missing call to h2g_valid. */
2329 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2330 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2332 assert(start
< end
);
2334 start
= start
& TARGET_PAGE_MASK
;
2335 end
= TARGET_PAGE_ALIGN(end
);
2337 if (flags
& PAGE_WRITE
) {
2338 flags
|= PAGE_WRITE_ORG
;
2341 for (addr
= start
, len
= end
- start
;
2343 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2344 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2346 /* If the write protection bit is set, then we invalidate
2348 if (!(p
->flags
& PAGE_WRITE
) &&
2349 (flags
& PAGE_WRITE
) &&
2351 tb_invalidate_phys_page(addr
, 0, NULL
);
2357 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2363 /* This function should never be called with addresses outside the
2364 guest address space. If this assert fires, it probably indicates
2365 a missing call to h2g_valid. */
2366 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2367 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2373 if (start
+ len
- 1 < start
) {
2374 /* We've wrapped around. */
2378 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2379 start
= start
& TARGET_PAGE_MASK
;
2381 for (addr
= start
, len
= end
- start
;
2383 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2384 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2387 if( !(p
->flags
& PAGE_VALID
) )
2390 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2392 if (flags
& PAGE_WRITE
) {
2393 if (!(p
->flags
& PAGE_WRITE_ORG
))
2395 /* unprotect the page if it was put read-only because it
2396 contains translated code */
2397 if (!(p
->flags
& PAGE_WRITE
)) {
2398 if (!page_unprotect(addr
, 0, NULL
))
2407 /* called from signal handler: invalidate the code and unprotect the
2408 page. Return TRUE if the fault was successfully handled. */
2409 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2413 target_ulong host_start
, host_end
, addr
;
2415 /* Technically this isn't safe inside a signal handler. However we
2416 know this only ever happens in a synchronous SEGV handler, so in
2417 practice it seems to be ok. */
2420 p
= page_find(address
>> TARGET_PAGE_BITS
);
2426 /* if the page was really writable, then we change its
2427 protection back to writable */
2428 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2429 host_start
= address
& qemu_host_page_mask
;
2430 host_end
= host_start
+ qemu_host_page_size
;
2433 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2434 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2435 p
->flags
|= PAGE_WRITE
;
2438 /* and since the content will be modified, we must invalidate
2439 the corresponding translated code. */
2440 tb_invalidate_phys_page(addr
, pc
, puc
);
2441 #ifdef DEBUG_TB_CHECK
2442 tb_invalidate_check(addr
);
2445 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2455 static inline void tlb_set_dirty(CPUState
*env
,
2456 unsigned long addr
, target_ulong vaddr
)
2459 #endif /* defined(CONFIG_USER_ONLY) */
2461 #if !defined(CONFIG_USER_ONLY)
2463 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2464 typedef struct subpage_t
{
2465 target_phys_addr_t base
;
2466 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2467 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2470 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2471 ram_addr_t memory
, ram_addr_t region_offset
);
2472 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2473 ram_addr_t orig_memory
,
2474 ram_addr_t region_offset
);
2475 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2478 if (addr > start_addr) \
2481 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2482 if (start_addr2 > 0) \
2486 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2487 end_addr2 = TARGET_PAGE_SIZE - 1; \
2489 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2490 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2495 /* register physical memory.
2496 For RAM, 'size' must be a multiple of the target page size.
2497 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2498 io memory page. The address used when calling the IO function is
2499 the offset from the start of the region, plus region_offset. Both
2500 start_addr and region_offset are rounded down to a page boundary
2501 before calculating this offset. This should not be a problem unless
2502 the low bits of start_addr and region_offset differ. */
2503 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2505 ram_addr_t phys_offset
,
2506 ram_addr_t region_offset
,
2509 target_phys_addr_t addr
, end_addr
;
2512 ram_addr_t orig_size
= size
;
2517 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2518 region_offset
= start_addr
;
2520 region_offset
&= TARGET_PAGE_MASK
;
2521 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2522 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2526 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2527 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2528 ram_addr_t orig_memory
= p
->phys_offset
;
2529 target_phys_addr_t start_addr2
, end_addr2
;
2530 int need_subpage
= 0;
2532 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2535 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2536 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2537 &p
->phys_offset
, orig_memory
,
2540 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2543 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2545 p
->region_offset
= 0;
2547 p
->phys_offset
= phys_offset
;
2548 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2549 (phys_offset
& IO_MEM_ROMD
))
2550 phys_offset
+= TARGET_PAGE_SIZE
;
2553 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2554 p
->phys_offset
= phys_offset
;
2555 p
->region_offset
= region_offset
;
2556 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2557 (phys_offset
& IO_MEM_ROMD
)) {
2558 phys_offset
+= TARGET_PAGE_SIZE
;
2560 target_phys_addr_t start_addr2
, end_addr2
;
2561 int need_subpage
= 0;
2563 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2564 end_addr2
, need_subpage
);
2567 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2568 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2569 addr
& TARGET_PAGE_MASK
);
2570 subpage_register(subpage
, start_addr2
, end_addr2
,
2571 phys_offset
, region_offset
);
2572 p
->region_offset
= 0;
2576 region_offset
+= TARGET_PAGE_SIZE
;
2577 addr
+= TARGET_PAGE_SIZE
;
2578 } while (addr
!= end_addr
);
2580 /* since each CPU stores ram addresses in its TLB cache, we must
2581 reset the modified entries */
2583 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2588 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2591 kvm_coalesce_mmio_region(addr
, size
);
2594 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2597 kvm_uncoalesce_mmio_region(addr
, size
);
2600 void qemu_flush_coalesced_mmio_buffer(void)
2603 kvm_flush_coalesced_mmio_buffer();
2606 #if defined(__linux__) && !defined(TARGET_S390X)
2608 #include <sys/vfs.h>
2610 #define HUGETLBFS_MAGIC 0x958458f6
2612 static long gethugepagesize(const char *path
)
2618 ret
= statfs(path
, &fs
);
2619 } while (ret
!= 0 && errno
== EINTR
);
2626 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2627 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2632 static void *file_ram_alloc(RAMBlock
*block
,
2642 unsigned long hpagesize
;
2644 hpagesize
= gethugepagesize(path
);
2649 if (memory
< hpagesize
) {
2653 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2654 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2658 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2662 fd
= mkstemp(filename
);
2664 perror("unable to create backing store for hugepages");
2671 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2674 * ftruncate is not supported by hugetlbfs in older
2675 * hosts, so don't bother bailing out on errors.
2676 * If anything goes wrong with it under other filesystems,
2679 if (ftruncate(fd
, memory
))
2680 perror("ftruncate");
2683 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2684 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2685 * to sidestep this quirk.
2687 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2688 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2690 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2692 if (area
== MAP_FAILED
) {
2693 perror("file_ram_alloc: can't mmap RAM pages");
2702 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2704 RAMBlock
*block
, *next_block
;
2705 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2707 if (QLIST_EMPTY(&ram_list
.blocks
))
2710 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2711 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2713 end
= block
->offset
+ block
->length
;
2715 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2716 if (next_block
->offset
>= end
) {
2717 next
= MIN(next
, next_block
->offset
);
2720 if (next
- end
>= size
&& next
- end
< mingap
) {
2722 mingap
= next
- end
;
2726 if (offset
== RAM_ADDR_MAX
) {
2727 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2735 static ram_addr_t
last_ram_offset(void)
2738 ram_addr_t last
= 0;
2740 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2741 last
= MAX(last
, block
->offset
+ block
->length
);
2746 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2748 RAMBlock
*new_block
, *block
;
2751 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2752 if (block
->offset
== addr
) {
2758 assert(!new_block
->idstr
[0]);
2760 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2761 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2763 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2767 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2769 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2770 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2771 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2778 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2781 RAMBlock
*new_block
;
2783 size
= TARGET_PAGE_ALIGN(size
);
2784 new_block
= g_malloc0(sizeof(*new_block
));
2787 new_block
->offset
= find_ram_offset(size
);
2789 new_block
->host
= host
;
2790 new_block
->flags
|= RAM_PREALLOC_MASK
;
2793 #if defined (__linux__) && !defined(TARGET_S390X)
2794 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2795 if (!new_block
->host
) {
2796 new_block
->host
= qemu_vmalloc(size
);
2797 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2800 fprintf(stderr
, "-mem-path option unsupported\n");
2804 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2805 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2806 an system defined value, which is at least 256GB. Larger systems
2807 have larger values. We put the guest between the end of data
2808 segment (system break) and this value. We use 32GB as a base to
2809 have enough room for the system break to grow. */
2810 new_block
->host
= mmap((void*)0x800000000, size
,
2811 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2812 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2813 if (new_block
->host
== MAP_FAILED
) {
2814 fprintf(stderr
, "Allocating RAM failed\n");
2818 if (xen_enabled()) {
2819 xen_ram_alloc(new_block
->offset
, size
, mr
);
2821 new_block
->host
= qemu_vmalloc(size
);
2824 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2827 new_block
->length
= size
;
2829 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2831 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2832 last_ram_offset() >> TARGET_PAGE_BITS
);
2833 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2834 0xff, size
>> TARGET_PAGE_BITS
);
2837 kvm_setup_guest_memory(new_block
->host
, size
);
2839 return new_block
->offset
;
2842 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2844 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2847 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2851 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2852 if (addr
== block
->offset
) {
2853 QLIST_REMOVE(block
, next
);
2860 void qemu_ram_free(ram_addr_t addr
)
2864 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2865 if (addr
== block
->offset
) {
2866 QLIST_REMOVE(block
, next
);
2867 if (block
->flags
& RAM_PREALLOC_MASK
) {
2869 } else if (mem_path
) {
2870 #if defined (__linux__) && !defined(TARGET_S390X)
2872 munmap(block
->host
, block
->length
);
2875 qemu_vfree(block
->host
);
2881 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2882 munmap(block
->host
, block
->length
);
2884 if (xen_enabled()) {
2885 xen_invalidate_map_cache_entry(block
->host
);
2887 qemu_vfree(block
->host
);
2899 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2906 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2907 offset
= addr
- block
->offset
;
2908 if (offset
< block
->length
) {
2909 vaddr
= block
->host
+ offset
;
2910 if (block
->flags
& RAM_PREALLOC_MASK
) {
2914 munmap(vaddr
, length
);
2916 #if defined(__linux__) && !defined(TARGET_S390X)
2919 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2922 flags
|= MAP_PRIVATE
;
2924 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2925 flags
, block
->fd
, offset
);
2927 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2928 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2935 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2936 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2937 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2940 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2941 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2945 if (area
!= vaddr
) {
2946 fprintf(stderr
, "Could not remap addr: "
2947 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2951 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2957 #endif /* !_WIN32 */
2959 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2960 With the exception of the softmmu code in this file, this should
2961 only be used for local memory (e.g. video ram) that the device owns,
2962 and knows it isn't going to access beyond the end of the block.
2964 It should not be used for general purpose DMA.
2965 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2967 void *qemu_get_ram_ptr(ram_addr_t addr
)
2971 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2972 if (addr
- block
->offset
< block
->length
) {
2973 /* Move this entry to to start of the list. */
2974 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2975 QLIST_REMOVE(block
, next
);
2976 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2978 if (xen_enabled()) {
2979 /* We need to check if the requested address is in the RAM
2980 * because we don't want to map the entire memory in QEMU.
2981 * In that case just map until the end of the page.
2983 if (block
->offset
== 0) {
2984 return xen_map_cache(addr
, 0, 0);
2985 } else if (block
->host
== NULL
) {
2987 xen_map_cache(block
->offset
, block
->length
, 1);
2990 return block
->host
+ (addr
- block
->offset
);
2994 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3000 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3001 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3003 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3007 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3008 if (addr
- block
->offset
< block
->length
) {
3009 if (xen_enabled()) {
3010 /* We need to check if the requested address is in the RAM
3011 * because we don't want to map the entire memory in QEMU.
3012 * In that case just map until the end of the page.
3014 if (block
->offset
== 0) {
3015 return xen_map_cache(addr
, 0, 0);
3016 } else if (block
->host
== NULL
) {
3018 xen_map_cache(block
->offset
, block
->length
, 1);
3021 return block
->host
+ (addr
- block
->offset
);
3025 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3031 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3032 * but takes a size argument */
3033 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3038 if (xen_enabled()) {
3039 return xen_map_cache(addr
, *size
, 1);
3043 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3044 if (addr
- block
->offset
< block
->length
) {
3045 if (addr
- block
->offset
+ *size
> block
->length
)
3046 *size
= block
->length
- addr
+ block
->offset
;
3047 return block
->host
+ (addr
- block
->offset
);
3051 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3056 void qemu_put_ram_ptr(void *addr
)
3058 trace_qemu_put_ram_ptr(addr
);
3061 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3064 uint8_t *host
= ptr
;
3066 if (xen_enabled()) {
3067 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3071 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3072 /* This case append when the block is not mapped. */
3073 if (block
->host
== NULL
) {
3076 if (host
- block
->host
< block
->length
) {
3077 *ram_addr
= block
->offset
+ (host
- block
->host
);
3085 /* Some of the softmmu routines need to translate from a host pointer
3086 (typically a TLB entry) back to a ram offset. */
3087 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3089 ram_addr_t ram_addr
;
3091 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3092 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3098 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3100 #ifdef DEBUG_UNASSIGNED
3101 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3103 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3104 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 1);
3109 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3111 #ifdef DEBUG_UNASSIGNED
3112 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3114 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3115 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 2);
3120 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3122 #ifdef DEBUG_UNASSIGNED
3123 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3125 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3126 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 4);
3131 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3133 #ifdef DEBUG_UNASSIGNED
3134 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3136 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3137 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 1);
3141 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3143 #ifdef DEBUG_UNASSIGNED
3144 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3146 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3147 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 2);
3151 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3153 #ifdef DEBUG_UNASSIGNED
3154 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3156 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3157 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 4);
3161 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3162 unassigned_mem_readb
,
3163 unassigned_mem_readw
,
3164 unassigned_mem_readl
,
3167 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3168 unassigned_mem_writeb
,
3169 unassigned_mem_writew
,
3170 unassigned_mem_writel
,
3173 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3177 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3178 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3179 #if !defined(CONFIG_USER_ONLY)
3180 tb_invalidate_phys_page_fast(ram_addr
, 1);
3181 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3184 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3185 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3186 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3187 /* we remove the notdirty callback only if the code has been
3189 if (dirty_flags
== 0xff)
3190 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3193 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3197 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3198 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3199 #if !defined(CONFIG_USER_ONLY)
3200 tb_invalidate_phys_page_fast(ram_addr
, 2);
3201 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3204 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3205 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3206 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3207 /* we remove the notdirty callback only if the code has been
3209 if (dirty_flags
== 0xff)
3210 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3213 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3217 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3218 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3219 #if !defined(CONFIG_USER_ONLY)
3220 tb_invalidate_phys_page_fast(ram_addr
, 4);
3221 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3224 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3225 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3226 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3227 /* we remove the notdirty callback only if the code has been
3229 if (dirty_flags
== 0xff)
3230 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3233 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3234 NULL
, /* never used */
3235 NULL
, /* never used */
3236 NULL
, /* never used */
3239 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3240 notdirty_mem_writeb
,
3241 notdirty_mem_writew
,
3242 notdirty_mem_writel
,
3245 /* Generate a debug exception if a watchpoint has been hit. */
3246 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3248 CPUState
*env
= cpu_single_env
;
3249 target_ulong pc
, cs_base
;
3250 TranslationBlock
*tb
;
3255 if (env
->watchpoint_hit
) {
3256 /* We re-entered the check after replacing the TB. Now raise
3257 * the debug interrupt so that is will trigger after the
3258 * current instruction. */
3259 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3262 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3263 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3264 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3265 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3266 wp
->flags
|= BP_WATCHPOINT_HIT
;
3267 if (!env
->watchpoint_hit
) {
3268 env
->watchpoint_hit
= wp
;
3269 tb
= tb_find_pc(env
->mem_io_pc
);
3271 cpu_abort(env
, "check_watchpoint: could not find TB for "
3272 "pc=%p", (void *)env
->mem_io_pc
);
3274 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3275 tb_phys_invalidate(tb
, -1);
3276 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3277 env
->exception_index
= EXCP_DEBUG
;
3279 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3280 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3282 cpu_resume_from_signal(env
, NULL
);
3285 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3290 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3291 so these check for a hit then pass through to the normal out-of-line
3293 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3295 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3296 return ldub_phys(addr
);
3299 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3301 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3302 return lduw_phys(addr
);
3305 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3307 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3308 return ldl_phys(addr
);
3311 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3314 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3315 stb_phys(addr
, val
);
3318 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3321 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3322 stw_phys(addr
, val
);
3325 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3328 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3329 stl_phys(addr
, val
);
3332 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3338 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3344 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3345 target_phys_addr_t addr
,
3348 unsigned int idx
= SUBPAGE_IDX(addr
);
3349 #if defined(DEBUG_SUBPAGE)
3350 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3351 mmio
, len
, addr
, idx
);
3354 addr
+= mmio
->region_offset
[idx
];
3355 idx
= mmio
->sub_io_index
[idx
];
3356 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3359 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3360 uint32_t value
, unsigned int len
)
3362 unsigned int idx
= SUBPAGE_IDX(addr
);
3363 #if defined(DEBUG_SUBPAGE)
3364 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3365 __func__
, mmio
, len
, addr
, idx
, value
);
3368 addr
+= mmio
->region_offset
[idx
];
3369 idx
= mmio
->sub_io_index
[idx
];
3370 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3373 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3375 return subpage_readlen(opaque
, addr
, 0);
3378 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3381 subpage_writelen(opaque
, addr
, value
, 0);
3384 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3386 return subpage_readlen(opaque
, addr
, 1);
3389 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3392 subpage_writelen(opaque
, addr
, value
, 1);
3395 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3397 return subpage_readlen(opaque
, addr
, 2);
3400 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3403 subpage_writelen(opaque
, addr
, value
, 2);
3406 static CPUReadMemoryFunc
* const subpage_read
[] = {
3412 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3418 static uint32_t subpage_ram_readb(void *opaque
, target_phys_addr_t addr
)
3420 ram_addr_t raddr
= addr
;
3421 void *ptr
= qemu_get_ram_ptr(raddr
);
3425 static void subpage_ram_writeb(void *opaque
, target_phys_addr_t addr
,
3428 ram_addr_t raddr
= addr
;
3429 void *ptr
= qemu_get_ram_ptr(raddr
);
3433 static uint32_t subpage_ram_readw(void *opaque
, target_phys_addr_t addr
)
3435 ram_addr_t raddr
= addr
;
3436 void *ptr
= qemu_get_ram_ptr(raddr
);
3440 static void subpage_ram_writew(void *opaque
, target_phys_addr_t addr
,
3443 ram_addr_t raddr
= addr
;
3444 void *ptr
= qemu_get_ram_ptr(raddr
);
3448 static uint32_t subpage_ram_readl(void *opaque
, target_phys_addr_t addr
)
3450 ram_addr_t raddr
= addr
;
3451 void *ptr
= qemu_get_ram_ptr(raddr
);
3455 static void subpage_ram_writel(void *opaque
, target_phys_addr_t addr
,
3458 ram_addr_t raddr
= addr
;
3459 void *ptr
= qemu_get_ram_ptr(raddr
);
3463 static CPUReadMemoryFunc
* const subpage_ram_read
[] = {
3469 static CPUWriteMemoryFunc
* const subpage_ram_write
[] = {
3470 &subpage_ram_writeb
,
3471 &subpage_ram_writew
,
3472 &subpage_ram_writel
,
3475 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3476 ram_addr_t memory
, ram_addr_t region_offset
)
3480 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3482 idx
= SUBPAGE_IDX(start
);
3483 eidx
= SUBPAGE_IDX(end
);
3484 #if defined(DEBUG_SUBPAGE)
3485 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3486 mmio
, start
, end
, idx
, eidx
, memory
);
3488 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
3489 memory
= IO_MEM_SUBPAGE_RAM
;
3491 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3492 for (; idx
<= eidx
; idx
++) {
3493 mmio
->sub_io_index
[idx
] = memory
;
3494 mmio
->region_offset
[idx
] = region_offset
;
3500 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3501 ram_addr_t orig_memory
,
3502 ram_addr_t region_offset
)
3507 mmio
= g_malloc0(sizeof(subpage_t
));
3510 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3511 #if defined(DEBUG_SUBPAGE)
3512 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3513 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3515 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3516 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3521 static int get_free_io_mem_idx(void)
3525 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3526 if (!io_mem_used
[i
]) {
3530 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3534 /* mem_read and mem_write are arrays of functions containing the
3535 function to access byte (index 0), word (index 1) and dword (index
3536 2). Functions can be omitted with a NULL function pointer.
3537 If io_index is non zero, the corresponding io zone is
3538 modified. If it is zero, a new io zone is allocated. The return
3539 value can be used with cpu_register_physical_memory(). (-1) is
3540 returned if error. */
3541 static int cpu_register_io_memory_fixed(int io_index
,
3542 CPUReadMemoryFunc
* const *mem_read
,
3543 CPUWriteMemoryFunc
* const *mem_write
,
3548 if (io_index
<= 0) {
3549 io_index
= get_free_io_mem_idx();
3553 io_index
>>= IO_MEM_SHIFT
;
3554 if (io_index
>= IO_MEM_NB_ENTRIES
)
3558 for (i
= 0; i
< 3; ++i
) {
3559 io_mem_read
[io_index
][i
]
3560 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3562 for (i
= 0; i
< 3; ++i
) {
3563 io_mem_write
[io_index
][i
]
3564 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3566 io_mem_opaque
[io_index
] = opaque
;
3568 return (io_index
<< IO_MEM_SHIFT
);
3571 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3572 CPUWriteMemoryFunc
* const *mem_write
,
3575 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3578 void cpu_unregister_io_memory(int io_table_address
)
3581 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3583 for (i
=0;i
< 3; i
++) {
3584 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3585 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3587 io_mem_opaque
[io_index
] = NULL
;
3588 io_mem_used
[io_index
] = 0;
3591 static void io_mem_init(void)
3595 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3596 unassigned_mem_write
, NULL
);
3597 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3598 unassigned_mem_write
, NULL
);
3599 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3600 notdirty_mem_write
, NULL
);
3601 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM
, subpage_ram_read
,
3602 subpage_ram_write
, NULL
);
3606 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3607 watch_mem_write
, NULL
);
3610 static void memory_map_init(void)
3612 system_memory
= g_malloc(sizeof(*system_memory
));
3613 memory_region_init(system_memory
, "system", INT64_MAX
);
3614 set_system_memory_map(system_memory
);
3616 system_io
= g_malloc(sizeof(*system_io
));
3617 memory_region_init(system_io
, "io", 65536);
3618 set_system_io_map(system_io
);
3621 MemoryRegion
*get_system_memory(void)
3623 return system_memory
;
3626 MemoryRegion
*get_system_io(void)
3631 #endif /* !defined(CONFIG_USER_ONLY) */
3633 /* physical memory access (slow version, mainly for debug) */
3634 #if defined(CONFIG_USER_ONLY)
3635 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3636 uint8_t *buf
, int len
, int is_write
)
3643 page
= addr
& TARGET_PAGE_MASK
;
3644 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3647 flags
= page_get_flags(page
);
3648 if (!(flags
& PAGE_VALID
))
3651 if (!(flags
& PAGE_WRITE
))
3653 /* XXX: this code should not depend on lock_user */
3654 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3657 unlock_user(p
, addr
, l
);
3659 if (!(flags
& PAGE_READ
))
3661 /* XXX: this code should not depend on lock_user */
3662 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3665 unlock_user(p
, addr
, 0);
3675 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3676 int len
, int is_write
)
3681 target_phys_addr_t page
;
3686 page
= addr
& TARGET_PAGE_MASK
;
3687 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3690 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3692 pd
= IO_MEM_UNASSIGNED
;
3694 pd
= p
->phys_offset
;
3698 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3699 target_phys_addr_t addr1
= addr
;
3700 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3702 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3703 /* XXX: could force cpu_single_env to NULL to avoid
3705 if (l
>= 4 && ((addr1
& 3) == 0)) {
3706 /* 32 bit write access */
3708 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3710 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3711 /* 16 bit write access */
3713 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3716 /* 8 bit write access */
3718 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3723 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3725 ptr
= qemu_get_ram_ptr(addr1
);
3726 memcpy(ptr
, buf
, l
);
3727 if (!cpu_physical_memory_is_dirty(addr1
)) {
3728 /* invalidate code */
3729 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3731 cpu_physical_memory_set_dirty_flags(
3732 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3734 qemu_put_ram_ptr(ptr
);
3737 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3738 !(pd
& IO_MEM_ROMD
)) {
3739 target_phys_addr_t addr1
= addr
;
3741 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3743 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3744 if (l
>= 4 && ((addr1
& 3) == 0)) {
3745 /* 32 bit read access */
3746 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3749 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3750 /* 16 bit read access */
3751 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3755 /* 8 bit read access */
3756 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3762 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3763 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3764 qemu_put_ram_ptr(ptr
);
3773 /* used for ROM loading : can write in RAM and ROM */
3774 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3775 const uint8_t *buf
, int len
)
3779 target_phys_addr_t page
;
3784 page
= addr
& TARGET_PAGE_MASK
;
3785 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3788 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3790 pd
= IO_MEM_UNASSIGNED
;
3792 pd
= p
->phys_offset
;
3795 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3796 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3797 !(pd
& IO_MEM_ROMD
)) {
3800 unsigned long addr1
;
3801 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3803 ptr
= qemu_get_ram_ptr(addr1
);
3804 memcpy(ptr
, buf
, l
);
3805 qemu_put_ram_ptr(ptr
);
3815 target_phys_addr_t addr
;
3816 target_phys_addr_t len
;
3819 static BounceBuffer bounce
;
3821 typedef struct MapClient
{
3823 void (*callback
)(void *opaque
);
3824 QLIST_ENTRY(MapClient
) link
;
3827 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3828 = QLIST_HEAD_INITIALIZER(map_client_list
);
3830 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3832 MapClient
*client
= g_malloc(sizeof(*client
));
3834 client
->opaque
= opaque
;
3835 client
->callback
= callback
;
3836 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3840 void cpu_unregister_map_client(void *_client
)
3842 MapClient
*client
= (MapClient
*)_client
;
3844 QLIST_REMOVE(client
, link
);
3848 static void cpu_notify_map_clients(void)
3852 while (!QLIST_EMPTY(&map_client_list
)) {
3853 client
= QLIST_FIRST(&map_client_list
);
3854 client
->callback(client
->opaque
);
3855 cpu_unregister_map_client(client
);
3859 /* Map a physical memory region into a host virtual address.
3860 * May map a subset of the requested range, given by and returned in *plen.
3861 * May return NULL if resources needed to perform the mapping are exhausted.
3862 * Use only for reads OR writes - not for read-modify-write operations.
3863 * Use cpu_register_map_client() to know when retrying the map operation is
3864 * likely to succeed.
3866 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3867 target_phys_addr_t
*plen
,
3870 target_phys_addr_t len
= *plen
;
3871 target_phys_addr_t todo
= 0;
3873 target_phys_addr_t page
;
3876 ram_addr_t raddr
= RAM_ADDR_MAX
;
3881 page
= addr
& TARGET_PAGE_MASK
;
3882 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3885 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3887 pd
= IO_MEM_UNASSIGNED
;
3889 pd
= p
->phys_offset
;
3892 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3893 if (todo
|| bounce
.buffer
) {
3896 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3900 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3904 return bounce
.buffer
;
3907 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3915 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3920 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3921 * Will also mark the memory as dirty if is_write == 1. access_len gives
3922 * the amount of memory that was actually read or written by the caller.
3924 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3925 int is_write
, target_phys_addr_t access_len
)
3927 if (buffer
!= bounce
.buffer
) {
3929 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3930 while (access_len
) {
3932 l
= TARGET_PAGE_SIZE
;
3935 if (!cpu_physical_memory_is_dirty(addr1
)) {
3936 /* invalidate code */
3937 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3939 cpu_physical_memory_set_dirty_flags(
3940 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3946 if (xen_enabled()) {
3947 xen_invalidate_map_cache_entry(buffer
);
3952 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3954 qemu_vfree(bounce
.buffer
);
3955 bounce
.buffer
= NULL
;
3956 cpu_notify_map_clients();
3959 /* warning: addr must be aligned */
3960 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3961 enum device_endian endian
)
3969 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3971 pd
= IO_MEM_UNASSIGNED
;
3973 pd
= p
->phys_offset
;
3976 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3977 !(pd
& IO_MEM_ROMD
)) {
3979 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3981 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3982 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3983 #if defined(TARGET_WORDS_BIGENDIAN)
3984 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3988 if (endian
== DEVICE_BIG_ENDIAN
) {
3994 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3995 (addr
& ~TARGET_PAGE_MASK
);
3997 case DEVICE_LITTLE_ENDIAN
:
3998 val
= ldl_le_p(ptr
);
4000 case DEVICE_BIG_ENDIAN
:
4001 val
= ldl_be_p(ptr
);
4011 uint32_t ldl_phys(target_phys_addr_t addr
)
4013 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4016 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4018 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4021 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4023 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4026 /* warning: addr must be aligned */
4027 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4028 enum device_endian endian
)
4036 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4038 pd
= IO_MEM_UNASSIGNED
;
4040 pd
= p
->phys_offset
;
4043 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4044 !(pd
& IO_MEM_ROMD
)) {
4046 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4048 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4050 /* XXX This is broken when device endian != cpu endian.
4051 Fix and add "endian" variable check */
4052 #ifdef TARGET_WORDS_BIGENDIAN
4053 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4054 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4056 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4057 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4061 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4062 (addr
& ~TARGET_PAGE_MASK
);
4064 case DEVICE_LITTLE_ENDIAN
:
4065 val
= ldq_le_p(ptr
);
4067 case DEVICE_BIG_ENDIAN
:
4068 val
= ldq_be_p(ptr
);
4078 uint64_t ldq_phys(target_phys_addr_t addr
)
4080 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4083 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4085 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4088 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4090 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4094 uint32_t ldub_phys(target_phys_addr_t addr
)
4097 cpu_physical_memory_read(addr
, &val
, 1);
4101 /* warning: addr must be aligned */
4102 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4103 enum device_endian endian
)
4111 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4113 pd
= IO_MEM_UNASSIGNED
;
4115 pd
= p
->phys_offset
;
4118 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4119 !(pd
& IO_MEM_ROMD
)) {
4121 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4123 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4124 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4125 #if defined(TARGET_WORDS_BIGENDIAN)
4126 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4130 if (endian
== DEVICE_BIG_ENDIAN
) {
4136 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4137 (addr
& ~TARGET_PAGE_MASK
);
4139 case DEVICE_LITTLE_ENDIAN
:
4140 val
= lduw_le_p(ptr
);
4142 case DEVICE_BIG_ENDIAN
:
4143 val
= lduw_be_p(ptr
);
4153 uint32_t lduw_phys(target_phys_addr_t addr
)
4155 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4158 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4160 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4163 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4165 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4168 /* warning: addr must be aligned. The ram page is not masked as dirty
4169 and the code inside is not invalidated. It is useful if the dirty
4170 bits are used to track modified PTEs */
4171 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4178 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4180 pd
= IO_MEM_UNASSIGNED
;
4182 pd
= p
->phys_offset
;
4185 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4186 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4188 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4189 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4191 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4192 ptr
= qemu_get_ram_ptr(addr1
);
4195 if (unlikely(in_migration
)) {
4196 if (!cpu_physical_memory_is_dirty(addr1
)) {
4197 /* invalidate code */
4198 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4200 cpu_physical_memory_set_dirty_flags(
4201 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4207 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4214 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4216 pd
= IO_MEM_UNASSIGNED
;
4218 pd
= p
->phys_offset
;
4221 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4222 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4224 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4225 #ifdef TARGET_WORDS_BIGENDIAN
4226 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4227 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4229 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4230 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4233 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4234 (addr
& ~TARGET_PAGE_MASK
);
4239 /* warning: addr must be aligned */
4240 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4241 enum device_endian endian
)
4248 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4250 pd
= IO_MEM_UNASSIGNED
;
4252 pd
= p
->phys_offset
;
4255 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4256 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4258 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4259 #if defined(TARGET_WORDS_BIGENDIAN)
4260 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4264 if (endian
== DEVICE_BIG_ENDIAN
) {
4268 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4270 unsigned long addr1
;
4271 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4273 ptr
= qemu_get_ram_ptr(addr1
);
4275 case DEVICE_LITTLE_ENDIAN
:
4278 case DEVICE_BIG_ENDIAN
:
4285 if (!cpu_physical_memory_is_dirty(addr1
)) {
4286 /* invalidate code */
4287 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4289 cpu_physical_memory_set_dirty_flags(addr1
,
4290 (0xff & ~CODE_DIRTY_FLAG
));
4295 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4297 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4300 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4302 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4305 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4307 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4311 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4314 cpu_physical_memory_write(addr
, &v
, 1);
4317 /* warning: addr must be aligned */
4318 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4319 enum device_endian endian
)
4326 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4328 pd
= IO_MEM_UNASSIGNED
;
4330 pd
= p
->phys_offset
;
4333 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4334 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4336 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4337 #if defined(TARGET_WORDS_BIGENDIAN)
4338 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4342 if (endian
== DEVICE_BIG_ENDIAN
) {
4346 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4348 unsigned long addr1
;
4349 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4351 ptr
= qemu_get_ram_ptr(addr1
);
4353 case DEVICE_LITTLE_ENDIAN
:
4356 case DEVICE_BIG_ENDIAN
:
4363 if (!cpu_physical_memory_is_dirty(addr1
)) {
4364 /* invalidate code */
4365 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4367 cpu_physical_memory_set_dirty_flags(addr1
,
4368 (0xff & ~CODE_DIRTY_FLAG
));
4373 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4375 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4378 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4380 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4383 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4385 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4389 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4392 cpu_physical_memory_write(addr
, &val
, 8);
4395 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4397 val
= cpu_to_le64(val
);
4398 cpu_physical_memory_write(addr
, &val
, 8);
4401 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4403 val
= cpu_to_be64(val
);
4404 cpu_physical_memory_write(addr
, &val
, 8);
4407 /* virtual memory access for debug (includes writing to ROM) */
4408 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4409 uint8_t *buf
, int len
, int is_write
)
4412 target_phys_addr_t phys_addr
;
4416 page
= addr
& TARGET_PAGE_MASK
;
4417 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4418 /* if no physical page mapped, return an error */
4419 if (phys_addr
== -1)
4421 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4424 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4426 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4428 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4437 /* in deterministic execution mode, instructions doing device I/Os
4438 must be at the end of the TB */
4439 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4441 TranslationBlock
*tb
;
4443 target_ulong pc
, cs_base
;
4446 tb
= tb_find_pc((unsigned long)retaddr
);
4448 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4451 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4452 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4453 /* Calculate how many instructions had been executed before the fault
4455 n
= n
- env
->icount_decr
.u16
.low
;
4456 /* Generate a new TB ending on the I/O insn. */
4458 /* On MIPS and SH, delay slot instructions can only be restarted if
4459 they were already the first instruction in the TB. If this is not
4460 the first instruction in a TB then re-execute the preceding
4462 #if defined(TARGET_MIPS)
4463 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4464 env
->active_tc
.PC
-= 4;
4465 env
->icount_decr
.u16
.low
++;
4466 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4468 #elif defined(TARGET_SH4)
4469 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4472 env
->icount_decr
.u16
.low
++;
4473 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4476 /* This should never happen. */
4477 if (n
> CF_COUNT_MASK
)
4478 cpu_abort(env
, "TB too big during recompile");
4480 cflags
= n
| CF_LAST_IO
;
4482 cs_base
= tb
->cs_base
;
4484 tb_phys_invalidate(tb
, -1);
4485 /* FIXME: In theory this could raise an exception. In practice
4486 we have already translated the block once so it's probably ok. */
4487 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4488 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4489 the first in the TB) then we end up generating a whole new TB and
4490 repeating the fault, which is horribly inefficient.
4491 Better would be to execute just this insn uncached, or generate a
4493 cpu_resume_from_signal(env
, NULL
);
4496 #if !defined(CONFIG_USER_ONLY)
4498 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4500 int i
, target_code_size
, max_target_code_size
;
4501 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4502 TranslationBlock
*tb
;
4504 target_code_size
= 0;
4505 max_target_code_size
= 0;
4507 direct_jmp_count
= 0;
4508 direct_jmp2_count
= 0;
4509 for(i
= 0; i
< nb_tbs
; i
++) {
4511 target_code_size
+= tb
->size
;
4512 if (tb
->size
> max_target_code_size
)
4513 max_target_code_size
= tb
->size
;
4514 if (tb
->page_addr
[1] != -1)
4516 if (tb
->tb_next_offset
[0] != 0xffff) {
4518 if (tb
->tb_next_offset
[1] != 0xffff) {
4519 direct_jmp2_count
++;
4523 /* XXX: avoid using doubles ? */
4524 cpu_fprintf(f
, "Translation buffer state:\n");
4525 cpu_fprintf(f
, "gen code size %td/%ld\n",
4526 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4527 cpu_fprintf(f
, "TB count %d/%d\n",
4528 nb_tbs
, code_gen_max_blocks
);
4529 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4530 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4531 max_target_code_size
);
4532 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4533 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4534 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4535 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4537 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4538 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4540 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4542 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4543 cpu_fprintf(f
, "\nStatistics:\n");
4544 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4545 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4546 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4547 tcg_dump_info(f
, cpu_fprintf
);
4550 #define MMUSUFFIX _cmmu
4552 #define GETPC() NULL
4553 #define env cpu_single_env
4554 #define SOFTMMU_CODE_ACCESS
4557 #include "softmmu_template.h"
4560 #include "softmmu_template.h"
4563 #include "softmmu_template.h"
4566 #include "softmmu_template.h"