2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
121 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
126 /* current CPU in the current thread. It is only valid inside
128 DEFINE_TLS(CPUState
*,cpu_single_env
);
129 /* 0 = Do not count executed instructions.
130 1 = Precise instruction counting.
131 2 = Adaptive rate instruction counting. */
134 typedef struct PageDesc
{
135 /* list of TBs intersecting this ram page */
136 TranslationBlock
*first_tb
;
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count
;
140 uint8_t *code_bitmap
;
141 #if defined(CONFIG_USER_ONLY)
146 /* In system mode we want L1_MAP to be based on ram offsets,
147 while in user mode we want it to be based on virtual addresses. */
148 #if !defined(CONFIG_USER_ONLY)
149 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
155 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
158 /* Size of the L2 (and L3, etc) page tables. */
160 #define L2_SIZE (1 << L2_BITS)
162 /* The bits remaining after N lower levels of page tables. */
163 #define P_L1_BITS_REM \
164 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165 #define V_L1_BITS_REM \
166 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 /* Size of the L1 page table. Avoid silly small sizes. */
169 #if P_L1_BITS_REM < 4
170 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
172 #define P_L1_BITS P_L1_BITS_REM
175 #if V_L1_BITS_REM < 4
176 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
178 #define V_L1_BITS V_L1_BITS_REM
181 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
182 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
184 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
185 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187 unsigned long qemu_real_host_page_size
;
188 unsigned long qemu_host_page_size
;
189 unsigned long qemu_host_page_mask
;
191 /* This is a multi-level map on the virtual address space.
192 The bottom level has pointers to PageDesc. */
193 static void *l1_map
[V_L1_SIZE
];
195 #if !defined(CONFIG_USER_ONLY)
196 typedef struct PhysPageDesc
{
197 /* offset in host memory of the page + io_index in the low bits */
198 ram_addr_t phys_offset
;
199 ram_addr_t region_offset
;
202 /* This is a multi-level map on the physical address space.
203 The bottom level has pointers to PhysPageDesc. */
204 static void *l1_phys_map
[P_L1_SIZE
];
206 static void io_mem_init(void);
207 static void memory_map_init(void);
209 /* io memory support */
210 CPUWriteMemoryFunc
*_io_mem_write
[IO_MEM_NB_ENTRIES
][4];
211 CPUReadMemoryFunc
*_io_mem_read
[IO_MEM_NB_ENTRIES
][4];
212 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
213 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
214 static int io_mem_watch
;
219 static const char *logfilename
= "qemu.log";
221 static const char *logfilename
= "/tmp/qemu.log";
225 static int log_append
= 0;
228 #if !defined(CONFIG_USER_ONLY)
229 static int tlb_flush_count
;
231 static int tb_flush_count
;
232 static int tb_phys_invalidate_count
;
235 static void map_exec(void *addr
, long size
)
238 VirtualProtect(addr
, size
,
239 PAGE_EXECUTE_READWRITE
, &old_protect
);
243 static void map_exec(void *addr
, long size
)
245 unsigned long start
, end
, page_size
;
247 page_size
= getpagesize();
248 start
= (unsigned long)addr
;
249 start
&= ~(page_size
- 1);
251 end
= (unsigned long)addr
+ size
;
252 end
+= page_size
- 1;
253 end
&= ~(page_size
- 1);
255 mprotect((void *)start
, end
- start
,
256 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
260 static void page_init(void)
262 /* NOTE: we can always suppose that qemu_host_page_size >=
266 SYSTEM_INFO system_info
;
268 GetSystemInfo(&system_info
);
269 qemu_real_host_page_size
= system_info
.dwPageSize
;
272 qemu_real_host_page_size
= getpagesize();
274 if (qemu_host_page_size
== 0)
275 qemu_host_page_size
= qemu_real_host_page_size
;
276 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
277 qemu_host_page_size
= TARGET_PAGE_SIZE
;
278 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
280 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
282 #ifdef HAVE_KINFO_GETVMMAP
283 struct kinfo_vmentry
*freep
;
286 freep
= kinfo_getvmmap(getpid(), &cnt
);
289 for (i
= 0; i
< cnt
; i
++) {
290 unsigned long startaddr
, endaddr
;
292 startaddr
= freep
[i
].kve_start
;
293 endaddr
= freep
[i
].kve_end
;
294 if (h2g_valid(startaddr
)) {
295 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
297 if (h2g_valid(endaddr
)) {
298 endaddr
= h2g(endaddr
);
299 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
301 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
314 last_brk
= (unsigned long)sbrk(0);
316 f
= fopen("/compat/linux/proc/self/maps", "r");
321 unsigned long startaddr
, endaddr
;
324 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
326 if (n
== 2 && h2g_valid(startaddr
)) {
327 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
329 if (h2g_valid(endaddr
)) {
330 endaddr
= h2g(endaddr
);
334 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
346 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
352 #if defined(CONFIG_USER_ONLY)
353 /* We can't use g_malloc because it may recurse into a locked mutex. */
354 # define ALLOC(P, SIZE) \
356 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
360 # define ALLOC(P, SIZE) \
361 do { P = g_malloc0(SIZE); } while (0)
364 /* Level 1. Always allocated. */
365 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
368 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
375 ALLOC(p
, sizeof(void *) * L2_SIZE
);
379 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
387 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
393 return pd
+ (index
& (L2_SIZE
- 1));
396 static inline PageDesc
*page_find(tb_page_addr_t index
)
398 return page_find_alloc(index
, 0);
401 #if !defined(CONFIG_USER_ONLY)
402 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
408 /* Level 1. Always allocated. */
409 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
412 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
418 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
420 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
426 int first_index
= index
& ~(L2_SIZE
- 1);
432 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
434 for (i
= 0; i
< L2_SIZE
; i
++) {
435 pd
[i
].phys_offset
= io_mem_unassigned
.ram_addr
;
436 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
440 return pd
+ (index
& (L2_SIZE
- 1));
443 static inline PhysPageDesc
phys_page_find(target_phys_addr_t index
)
445 PhysPageDesc
*p
= phys_page_find_alloc(index
, 0);
450 return (PhysPageDesc
) {
451 .phys_offset
= io_mem_unassigned
.ram_addr
,
452 .region_offset
= index
<< TARGET_PAGE_BITS
,
457 static void tlb_protect_code(ram_addr_t ram_addr
);
458 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
460 #define mmap_lock() do { } while(0)
461 #define mmap_unlock() do { } while(0)
464 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466 #if defined(CONFIG_USER_ONLY)
467 /* Currently it is not recommended to allocate big chunks of data in
468 user mode. It will change when a dedicated libc will be used */
469 #define USE_STATIC_CODE_GEN_BUFFER
472 #ifdef USE_STATIC_CODE_GEN_BUFFER
473 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
474 __attribute__((aligned (CODE_GEN_ALIGN
)));
477 static void code_gen_alloc(unsigned long tb_size
)
479 #ifdef USE_STATIC_CODE_GEN_BUFFER
480 code_gen_buffer
= static_code_gen_buffer
;
481 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
482 map_exec(code_gen_buffer
, code_gen_buffer_size
);
484 code_gen_buffer_size
= tb_size
;
485 if (code_gen_buffer_size
== 0) {
486 #if defined(CONFIG_USER_ONLY)
487 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
489 /* XXX: needs adjustments */
490 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
493 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
494 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
495 /* The code gen buffer location may have constraints depending on
496 the host cpu and OS */
497 #if defined(__linux__)
502 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
503 #if defined(__x86_64__)
505 /* Cannot map more than that */
506 if (code_gen_buffer_size
> (800 * 1024 * 1024))
507 code_gen_buffer_size
= (800 * 1024 * 1024);
508 #elif defined(__sparc_v9__)
509 // Map the buffer below 2G, so we can use direct calls and branches
511 start
= (void *) 0x60000000UL
;
512 if (code_gen_buffer_size
> (512 * 1024 * 1024))
513 code_gen_buffer_size
= (512 * 1024 * 1024);
514 #elif defined(__arm__)
515 /* Keep the buffer no bigger than 16GB to branch between blocks */
516 if (code_gen_buffer_size
> 16 * 1024 * 1024)
517 code_gen_buffer_size
= 16 * 1024 * 1024;
518 #elif defined(__s390x__)
519 /* Map the buffer so that we can use direct calls and branches. */
520 /* We have a +- 4GB range on the branches; leave some slop. */
521 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
522 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
524 start
= (void *)0x90000000UL
;
526 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
527 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
529 if (code_gen_buffer
== MAP_FAILED
) {
530 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
534 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
535 || defined(__DragonFly__) || defined(__OpenBSD__) \
536 || defined(__NetBSD__)
540 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
541 #if defined(__x86_64__)
542 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
543 * 0x40000000 is free */
545 addr
= (void *)0x40000000;
546 /* Cannot map more than that */
547 if (code_gen_buffer_size
> (800 * 1024 * 1024))
548 code_gen_buffer_size
= (800 * 1024 * 1024);
549 #elif defined(__sparc_v9__)
550 // Map the buffer below 2G, so we can use direct calls and branches
552 addr
= (void *) 0x60000000UL
;
553 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
554 code_gen_buffer_size
= (512 * 1024 * 1024);
557 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
558 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
560 if (code_gen_buffer
== MAP_FAILED
) {
561 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
566 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
567 map_exec(code_gen_buffer
, code_gen_buffer_size
);
569 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
570 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
571 code_gen_buffer_max_size
= code_gen_buffer_size
-
572 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
573 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
574 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
577 /* Must be called before using the QEMU cpus. 'tb_size' is the size
578 (in bytes) allocated to the translation buffer. Zero means default
580 void tcg_exec_init(unsigned long tb_size
)
583 code_gen_alloc(tb_size
);
584 code_gen_ptr
= code_gen_buffer
;
586 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
587 /* There's no guest base to take into account, so go ahead and
588 initialize the prologue now. */
589 tcg_prologue_init(&tcg_ctx
);
593 bool tcg_enabled(void)
595 return code_gen_buffer
!= NULL
;
598 void cpu_exec_init_all(void)
600 #if !defined(CONFIG_USER_ONLY)
606 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
608 static int cpu_common_post_load(void *opaque
, int version_id
)
610 CPUState
*env
= opaque
;
612 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
613 version_id is increased. */
614 env
->interrupt_request
&= ~0x01;
620 static const VMStateDescription vmstate_cpu_common
= {
621 .name
= "cpu_common",
623 .minimum_version_id
= 1,
624 .minimum_version_id_old
= 1,
625 .post_load
= cpu_common_post_load
,
626 .fields
= (VMStateField
[]) {
627 VMSTATE_UINT32(halted
, CPUState
),
628 VMSTATE_UINT32(interrupt_request
, CPUState
),
629 VMSTATE_END_OF_LIST()
634 CPUState
*qemu_get_cpu(int cpu
)
636 CPUState
*env
= first_cpu
;
639 if (env
->cpu_index
== cpu
)
647 void cpu_exec_init(CPUState
*env
)
652 #if defined(CONFIG_USER_ONLY)
655 env
->next_cpu
= NULL
;
658 while (*penv
!= NULL
) {
659 penv
= &(*penv
)->next_cpu
;
662 env
->cpu_index
= cpu_index
;
664 QTAILQ_INIT(&env
->breakpoints
);
665 QTAILQ_INIT(&env
->watchpoints
);
666 #ifndef CONFIG_USER_ONLY
667 env
->thread_id
= qemu_get_thread_id();
670 #if defined(CONFIG_USER_ONLY)
673 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
674 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
675 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
676 cpu_save
, cpu_load
, env
);
680 /* Allocate a new translation block. Flush the translation buffer if
681 too many translation blocks or too much generated code. */
682 static TranslationBlock
*tb_alloc(target_ulong pc
)
684 TranslationBlock
*tb
;
686 if (nb_tbs
>= code_gen_max_blocks
||
687 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
695 void tb_free(TranslationBlock
*tb
)
697 /* In practice this is mostly used for single use temporary TB
698 Ignore the hard cases and just back up if this TB happens to
699 be the last one generated. */
700 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
701 code_gen_ptr
= tb
->tc_ptr
;
706 static inline void invalidate_page_bitmap(PageDesc
*p
)
708 if (p
->code_bitmap
) {
709 g_free(p
->code_bitmap
);
710 p
->code_bitmap
= NULL
;
712 p
->code_write_count
= 0;
715 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
717 static void page_flush_tb_1 (int level
, void **lp
)
726 for (i
= 0; i
< L2_SIZE
; ++i
) {
727 pd
[i
].first_tb
= NULL
;
728 invalidate_page_bitmap(pd
+ i
);
732 for (i
= 0; i
< L2_SIZE
; ++i
) {
733 page_flush_tb_1 (level
- 1, pp
+ i
);
738 static void page_flush_tb(void)
741 for (i
= 0; i
< V_L1_SIZE
; i
++) {
742 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
746 /* flush all the translation blocks */
747 /* XXX: tb_flush is currently not thread safe */
748 void tb_flush(CPUState
*env1
)
751 #if defined(DEBUG_FLUSH)
752 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
753 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
755 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
757 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
758 cpu_abort(env1
, "Internal error: code buffer overflow\n");
762 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
763 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
766 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
769 code_gen_ptr
= code_gen_buffer
;
770 /* XXX: flush processor icache at this point if cache flush is
775 #ifdef DEBUG_TB_CHECK
777 static void tb_invalidate_check(target_ulong address
)
779 TranslationBlock
*tb
;
781 address
&= TARGET_PAGE_MASK
;
782 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
783 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
784 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
785 address
>= tb
->pc
+ tb
->size
)) {
786 printf("ERROR invalidate: address=" TARGET_FMT_lx
787 " PC=%08lx size=%04x\n",
788 address
, (long)tb
->pc
, tb
->size
);
794 /* verify that all the pages have correct rights for code */
795 static void tb_page_check(void)
797 TranslationBlock
*tb
;
798 int i
, flags1
, flags2
;
800 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
801 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
802 flags1
= page_get_flags(tb
->pc
);
803 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
804 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
805 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
806 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
814 /* invalidate one TB */
815 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
818 TranslationBlock
*tb1
;
822 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
825 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
829 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
831 TranslationBlock
*tb1
;
837 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
839 *ptb
= tb1
->page_next
[n1
];
842 ptb
= &tb1
->page_next
[n1
];
846 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
848 TranslationBlock
*tb1
, **ptb
;
851 ptb
= &tb
->jmp_next
[n
];
854 /* find tb(n) in circular list */
858 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
859 if (n1
== n
&& tb1
== tb
)
862 ptb
= &tb1
->jmp_first
;
864 ptb
= &tb1
->jmp_next
[n1
];
867 /* now we can suppress tb(n) from the list */
868 *ptb
= tb
->jmp_next
[n
];
870 tb
->jmp_next
[n
] = NULL
;
874 /* reset the jump entry 'n' of a TB so that it is not chained to
876 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
878 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
881 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
886 tb_page_addr_t phys_pc
;
887 TranslationBlock
*tb1
, *tb2
;
889 /* remove the TB from the hash list */
890 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
891 h
= tb_phys_hash_func(phys_pc
);
892 tb_remove(&tb_phys_hash
[h
], tb
,
893 offsetof(TranslationBlock
, phys_hash_next
));
895 /* remove the TB from the page list */
896 if (tb
->page_addr
[0] != page_addr
) {
897 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
898 tb_page_remove(&p
->first_tb
, tb
);
899 invalidate_page_bitmap(p
);
901 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
902 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
903 tb_page_remove(&p
->first_tb
, tb
);
904 invalidate_page_bitmap(p
);
907 tb_invalidated_flag
= 1;
909 /* remove the TB from the hash list */
910 h
= tb_jmp_cache_hash_func(tb
->pc
);
911 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
912 if (env
->tb_jmp_cache
[h
] == tb
)
913 env
->tb_jmp_cache
[h
] = NULL
;
916 /* suppress this TB from the two jump lists */
917 tb_jmp_remove(tb
, 0);
918 tb_jmp_remove(tb
, 1);
920 /* suppress any remaining jumps to this TB */
926 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
927 tb2
= tb1
->jmp_next
[n1
];
928 tb_reset_jump(tb1
, n1
);
929 tb1
->jmp_next
[n1
] = NULL
;
932 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
934 tb_phys_invalidate_count
++;
937 static inline void set_bits(uint8_t *tab
, int start
, int len
)
943 mask
= 0xff << (start
& 7);
944 if ((start
& ~7) == (end
& ~7)) {
946 mask
&= ~(0xff << (end
& 7));
951 start
= (start
+ 8) & ~7;
953 while (start
< end1
) {
958 mask
= ~(0xff << (end
& 7));
964 static void build_page_bitmap(PageDesc
*p
)
966 int n
, tb_start
, tb_end
;
967 TranslationBlock
*tb
;
969 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
974 tb
= (TranslationBlock
*)((long)tb
& ~3);
975 /* NOTE: this is subtle as a TB may span two physical pages */
977 /* NOTE: tb_end may be after the end of the page, but
978 it is not a problem */
979 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
980 tb_end
= tb_start
+ tb
->size
;
981 if (tb_end
> TARGET_PAGE_SIZE
)
982 tb_end
= TARGET_PAGE_SIZE
;
985 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
987 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
988 tb
= tb
->page_next
[n
];
992 TranslationBlock
*tb_gen_code(CPUState
*env
,
993 target_ulong pc
, target_ulong cs_base
,
994 int flags
, int cflags
)
996 TranslationBlock
*tb
;
998 tb_page_addr_t phys_pc
, phys_page2
;
999 target_ulong virt_page2
;
1002 phys_pc
= get_page_addr_code(env
, pc
);
1005 /* flush must be done */
1007 /* cannot fail at this point */
1009 /* Don't forget to invalidate previous TB info. */
1010 tb_invalidated_flag
= 1;
1012 tc_ptr
= code_gen_ptr
;
1013 tb
->tc_ptr
= tc_ptr
;
1014 tb
->cs_base
= cs_base
;
1016 tb
->cflags
= cflags
;
1017 cpu_gen_code(env
, tb
, &code_gen_size
);
1018 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1020 /* check next page if needed */
1021 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1023 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1024 phys_page2
= get_page_addr_code(env
, virt_page2
);
1026 tb_link_page(tb
, phys_pc
, phys_page2
);
1030 /* invalidate all TBs which intersect with the target physical page
1031 starting in range [start;end[. NOTE: start and end must refer to
1032 the same physical page. 'is_cpu_write_access' should be true if called
1033 from a real cpu write access: the virtual CPU will exit the current
1034 TB if code is modified inside this TB. */
1035 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1036 int is_cpu_write_access
)
1038 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1039 CPUState
*env
= cpu_single_env
;
1040 tb_page_addr_t tb_start
, tb_end
;
1043 #ifdef TARGET_HAS_PRECISE_SMC
1044 int current_tb_not_found
= is_cpu_write_access
;
1045 TranslationBlock
*current_tb
= NULL
;
1046 int current_tb_modified
= 0;
1047 target_ulong current_pc
= 0;
1048 target_ulong current_cs_base
= 0;
1049 int current_flags
= 0;
1050 #endif /* TARGET_HAS_PRECISE_SMC */
1052 p
= page_find(start
>> TARGET_PAGE_BITS
);
1055 if (!p
->code_bitmap
&&
1056 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1057 is_cpu_write_access
) {
1058 /* build code bitmap */
1059 build_page_bitmap(p
);
1062 /* we remove all the TBs in the range [start, end[ */
1063 /* XXX: see if in some cases it could be faster to invalidate all the code */
1065 while (tb
!= NULL
) {
1067 tb
= (TranslationBlock
*)((long)tb
& ~3);
1068 tb_next
= tb
->page_next
[n
];
1069 /* NOTE: this is subtle as a TB may span two physical pages */
1071 /* NOTE: tb_end may be after the end of the page, but
1072 it is not a problem */
1073 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1074 tb_end
= tb_start
+ tb
->size
;
1076 tb_start
= tb
->page_addr
[1];
1077 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1079 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1080 #ifdef TARGET_HAS_PRECISE_SMC
1081 if (current_tb_not_found
) {
1082 current_tb_not_found
= 0;
1084 if (env
->mem_io_pc
) {
1085 /* now we have a real cpu fault */
1086 current_tb
= tb_find_pc(env
->mem_io_pc
);
1089 if (current_tb
== tb
&&
1090 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1091 /* If we are modifying the current TB, we must stop
1092 its execution. We could be more precise by checking
1093 that the modification is after the current PC, but it
1094 would require a specialized function to partially
1095 restore the CPU state */
1097 current_tb_modified
= 1;
1098 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1099 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1102 #endif /* TARGET_HAS_PRECISE_SMC */
1103 /* we need to do that to handle the case where a signal
1104 occurs while doing tb_phys_invalidate() */
1107 saved_tb
= env
->current_tb
;
1108 env
->current_tb
= NULL
;
1110 tb_phys_invalidate(tb
, -1);
1112 env
->current_tb
= saved_tb
;
1113 if (env
->interrupt_request
&& env
->current_tb
)
1114 cpu_interrupt(env
, env
->interrupt_request
);
1119 #if !defined(CONFIG_USER_ONLY)
1120 /* if no code remaining, no need to continue to use slow writes */
1122 invalidate_page_bitmap(p
);
1123 if (is_cpu_write_access
) {
1124 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1128 #ifdef TARGET_HAS_PRECISE_SMC
1129 if (current_tb_modified
) {
1130 /* we generate a block containing just the instruction
1131 modifying the memory. It will ensure that it cannot modify
1133 env
->current_tb
= NULL
;
1134 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1135 cpu_resume_from_signal(env
, NULL
);
1140 /* len must be <= 8 and start must be a multiple of len */
1141 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1147 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1148 cpu_single_env
->mem_io_vaddr
, len
,
1149 cpu_single_env
->eip
,
1150 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1153 p
= page_find(start
>> TARGET_PAGE_BITS
);
1156 if (p
->code_bitmap
) {
1157 offset
= start
& ~TARGET_PAGE_MASK
;
1158 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1159 if (b
& ((1 << len
) - 1))
1163 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1167 #if !defined(CONFIG_SOFTMMU)
1168 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1169 unsigned long pc
, void *puc
)
1171 TranslationBlock
*tb
;
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 TranslationBlock
*current_tb
= NULL
;
1176 CPUState
*env
= cpu_single_env
;
1177 int current_tb_modified
= 0;
1178 target_ulong current_pc
= 0;
1179 target_ulong current_cs_base
= 0;
1180 int current_flags
= 0;
1183 addr
&= TARGET_PAGE_MASK
;
1184 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1188 #ifdef TARGET_HAS_PRECISE_SMC
1189 if (tb
&& pc
!= 0) {
1190 current_tb
= tb_find_pc(pc
);
1193 while (tb
!= NULL
) {
1195 tb
= (TranslationBlock
*)((long)tb
& ~3);
1196 #ifdef TARGET_HAS_PRECISE_SMC
1197 if (current_tb
== tb
&&
1198 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1199 /* If we are modifying the current TB, we must stop
1200 its execution. We could be more precise by checking
1201 that the modification is after the current PC, but it
1202 would require a specialized function to partially
1203 restore the CPU state */
1205 current_tb_modified
= 1;
1206 cpu_restore_state(current_tb
, env
, pc
);
1207 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1210 #endif /* TARGET_HAS_PRECISE_SMC */
1211 tb_phys_invalidate(tb
, addr
);
1212 tb
= tb
->page_next
[n
];
1215 #ifdef TARGET_HAS_PRECISE_SMC
1216 if (current_tb_modified
) {
1217 /* we generate a block containing just the instruction
1218 modifying the memory. It will ensure that it cannot modify
1220 env
->current_tb
= NULL
;
1221 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1222 cpu_resume_from_signal(env
, puc
);
1228 /* add the tb in the target page and protect it if necessary */
1229 static inline void tb_alloc_page(TranslationBlock
*tb
,
1230 unsigned int n
, tb_page_addr_t page_addr
)
1233 #ifndef CONFIG_USER_ONLY
1234 bool page_already_protected
;
1237 tb
->page_addr
[n
] = page_addr
;
1238 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1239 tb
->page_next
[n
] = p
->first_tb
;
1240 #ifndef CONFIG_USER_ONLY
1241 page_already_protected
= p
->first_tb
!= NULL
;
1243 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1244 invalidate_page_bitmap(p
);
1246 #if defined(TARGET_HAS_SMC) || 1
1248 #if defined(CONFIG_USER_ONLY)
1249 if (p
->flags
& PAGE_WRITE
) {
1254 /* force the host page as non writable (writes will have a
1255 page fault + mprotect overhead) */
1256 page_addr
&= qemu_host_page_mask
;
1258 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1259 addr
+= TARGET_PAGE_SIZE
) {
1261 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1265 p2
->flags
&= ~PAGE_WRITE
;
1267 mprotect(g2h(page_addr
), qemu_host_page_size
,
1268 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1269 #ifdef DEBUG_TB_INVALIDATE
1270 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1275 /* if some code is already present, then the pages are already
1276 protected. So we handle the case where only the first TB is
1277 allocated in a physical page */
1278 if (!page_already_protected
) {
1279 tlb_protect_code(page_addr
);
1283 #endif /* TARGET_HAS_SMC */
1286 /* add a new TB and link it to the physical page tables. phys_page2 is
1287 (-1) to indicate that only one page contains the TB. */
1288 void tb_link_page(TranslationBlock
*tb
,
1289 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1292 TranslationBlock
**ptb
;
1294 /* Grab the mmap lock to stop another thread invalidating this TB
1295 before we are done. */
1297 /* add in the physical hash table */
1298 h
= tb_phys_hash_func(phys_pc
);
1299 ptb
= &tb_phys_hash
[h
];
1300 tb
->phys_hash_next
= *ptb
;
1303 /* add in the page list */
1304 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1305 if (phys_page2
!= -1)
1306 tb_alloc_page(tb
, 1, phys_page2
);
1308 tb
->page_addr
[1] = -1;
1310 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1311 tb
->jmp_next
[0] = NULL
;
1312 tb
->jmp_next
[1] = NULL
;
1314 /* init original jump addresses */
1315 if (tb
->tb_next_offset
[0] != 0xffff)
1316 tb_reset_jump(tb
, 0);
1317 if (tb
->tb_next_offset
[1] != 0xffff)
1318 tb_reset_jump(tb
, 1);
1320 #ifdef DEBUG_TB_CHECK
1326 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1327 tb[1].tc_ptr. Return NULL if not found */
1328 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1330 int m_min
, m_max
, m
;
1332 TranslationBlock
*tb
;
1336 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1337 tc_ptr
>= (unsigned long)code_gen_ptr
)
1339 /* binary search (cf Knuth) */
1342 while (m_min
<= m_max
) {
1343 m
= (m_min
+ m_max
) >> 1;
1345 v
= (unsigned long)tb
->tc_ptr
;
1348 else if (tc_ptr
< v
) {
1357 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1359 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1361 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1364 tb1
= tb
->jmp_next
[n
];
1366 /* find head of list */
1369 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1372 tb1
= tb1
->jmp_next
[n1
];
1374 /* we are now sure now that tb jumps to tb1 */
1377 /* remove tb from the jmp_first list */
1378 ptb
= &tb_next
->jmp_first
;
1382 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1383 if (n1
== n
&& tb1
== tb
)
1385 ptb
= &tb1
->jmp_next
[n1
];
1387 *ptb
= tb
->jmp_next
[n
];
1388 tb
->jmp_next
[n
] = NULL
;
1390 /* suppress the jump to next tb in generated code */
1391 tb_reset_jump(tb
, n
);
1393 /* suppress jumps in the tb on which we could have jumped */
1394 tb_reset_jump_recursive(tb_next
);
1398 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1400 tb_reset_jump_recursive2(tb
, 0);
1401 tb_reset_jump_recursive2(tb
, 1);
1404 #if defined(TARGET_HAS_ICE)
1405 #if defined(CONFIG_USER_ONLY)
1406 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1408 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1411 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1413 target_phys_addr_t addr
;
1415 ram_addr_t ram_addr
;
1418 addr
= cpu_get_phys_page_debug(env
, pc
);
1419 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1421 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1422 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1425 #endif /* TARGET_HAS_ICE */
1427 #if defined(CONFIG_USER_ONLY)
1428 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1433 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1434 int flags
, CPUWatchpoint
**watchpoint
)
1439 /* Add a watchpoint. */
1440 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1441 int flags
, CPUWatchpoint
**watchpoint
)
1443 target_ulong len_mask
= ~(len
- 1);
1446 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1447 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1448 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1449 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1452 wp
= g_malloc(sizeof(*wp
));
1455 wp
->len_mask
= len_mask
;
1458 /* keep all GDB-injected watchpoints in front */
1460 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1462 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1464 tlb_flush_page(env
, addr
);
1471 /* Remove a specific watchpoint. */
1472 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1475 target_ulong len_mask
= ~(len
- 1);
1478 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1479 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1480 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1481 cpu_watchpoint_remove_by_ref(env
, wp
);
1488 /* Remove a specific watchpoint by reference. */
1489 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1491 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1493 tlb_flush_page(env
, watchpoint
->vaddr
);
1498 /* Remove all matching watchpoints. */
1499 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1501 CPUWatchpoint
*wp
, *next
;
1503 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1504 if (wp
->flags
& mask
)
1505 cpu_watchpoint_remove_by_ref(env
, wp
);
1510 /* Add a breakpoint. */
1511 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1512 CPUBreakpoint
**breakpoint
)
1514 #if defined(TARGET_HAS_ICE)
1517 bp
= g_malloc(sizeof(*bp
));
1522 /* keep all GDB-injected breakpoints in front */
1524 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1526 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1528 breakpoint_invalidate(env
, pc
);
1538 /* Remove a specific breakpoint. */
1539 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1541 #if defined(TARGET_HAS_ICE)
1544 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1545 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1546 cpu_breakpoint_remove_by_ref(env
, bp
);
1556 /* Remove a specific breakpoint by reference. */
1557 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1559 #if defined(TARGET_HAS_ICE)
1560 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1562 breakpoint_invalidate(env
, breakpoint
->pc
);
1568 /* Remove all matching breakpoints. */
1569 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1571 #if defined(TARGET_HAS_ICE)
1572 CPUBreakpoint
*bp
, *next
;
1574 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1575 if (bp
->flags
& mask
)
1576 cpu_breakpoint_remove_by_ref(env
, bp
);
1581 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1582 CPU loop after each instruction */
1583 void cpu_single_step(CPUState
*env
, int enabled
)
1585 #if defined(TARGET_HAS_ICE)
1586 if (env
->singlestep_enabled
!= enabled
) {
1587 env
->singlestep_enabled
= enabled
;
1589 kvm_update_guest_debug(env
, 0);
1591 /* must flush all the translated code to avoid inconsistencies */
1592 /* XXX: only flush what is necessary */
1599 /* enable or disable low levels log */
1600 void cpu_set_log(int log_flags
)
1602 loglevel
= log_flags
;
1603 if (loglevel
&& !logfile
) {
1604 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1606 perror(logfilename
);
1609 #if !defined(CONFIG_SOFTMMU)
1610 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1612 static char logfile_buf
[4096];
1613 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1615 #elif defined(_WIN32)
1616 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1617 setvbuf(logfile
, NULL
, _IONBF
, 0);
1619 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1623 if (!loglevel
&& logfile
) {
1629 void cpu_set_log_filename(const char *filename
)
1631 logfilename
= strdup(filename
);
1636 cpu_set_log(loglevel
);
1639 static void cpu_unlink_tb(CPUState
*env
)
1641 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1642 problem and hope the cpu will stop of its own accord. For userspace
1643 emulation this often isn't actually as bad as it sounds. Often
1644 signals are used primarily to interrupt blocking syscalls. */
1645 TranslationBlock
*tb
;
1646 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1648 spin_lock(&interrupt_lock
);
1649 tb
= env
->current_tb
;
1650 /* if the cpu is currently executing code, we must unlink it and
1651 all the potentially executing TB */
1653 env
->current_tb
= NULL
;
1654 tb_reset_jump_recursive(tb
);
1656 spin_unlock(&interrupt_lock
);
1659 #ifndef CONFIG_USER_ONLY
1660 /* mask must never be zero, except for A20 change call */
1661 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1665 old_mask
= env
->interrupt_request
;
1666 env
->interrupt_request
|= mask
;
1669 * If called from iothread context, wake the target cpu in
1672 if (!qemu_cpu_is_self(env
)) {
1678 env
->icount_decr
.u16
.high
= 0xffff;
1680 && (mask
& ~old_mask
) != 0) {
1681 cpu_abort(env
, "Raised interrupt while not in I/O function");
1688 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1690 #else /* CONFIG_USER_ONLY */
1692 void cpu_interrupt(CPUState
*env
, int mask
)
1694 env
->interrupt_request
|= mask
;
1697 #endif /* CONFIG_USER_ONLY */
1699 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1701 env
->interrupt_request
&= ~mask
;
1704 void cpu_exit(CPUState
*env
)
1706 env
->exit_request
= 1;
1710 const CPULogItem cpu_log_items
[] = {
1711 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1712 "show generated host assembly code for each compiled TB" },
1713 { CPU_LOG_TB_IN_ASM
, "in_asm",
1714 "show target assembly code for each compiled TB" },
1715 { CPU_LOG_TB_OP
, "op",
1716 "show micro ops for each compiled TB" },
1717 { CPU_LOG_TB_OP_OPT
, "op_opt",
1720 "before eflags optimization and "
1722 "after liveness analysis" },
1723 { CPU_LOG_INT
, "int",
1724 "show interrupts/exceptions in short format" },
1725 { CPU_LOG_EXEC
, "exec",
1726 "show trace before each executed TB (lots of logs)" },
1727 { CPU_LOG_TB_CPU
, "cpu",
1728 "show CPU state before block translation" },
1730 { CPU_LOG_PCALL
, "pcall",
1731 "show protected mode far calls/returns/exceptions" },
1732 { CPU_LOG_RESET
, "cpu_reset",
1733 "show CPU state before CPU resets" },
1736 { CPU_LOG_IOPORT
, "ioport",
1737 "show all i/o ports accesses" },
1742 static int cmp1(const char *s1
, int n
, const char *s2
)
1744 if (strlen(s2
) != n
)
1746 return memcmp(s1
, s2
, n
) == 0;
1749 /* takes a comma separated list of log masks. Return 0 if error. */
1750 int cpu_str_to_log_mask(const char *str
)
1752 const CPULogItem
*item
;
1759 p1
= strchr(p
, ',');
1762 if(cmp1(p
,p1
-p
,"all")) {
1763 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1767 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1768 if (cmp1(p
, p1
- p
, item
->name
))
1782 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1789 fprintf(stderr
, "qemu: fatal: ");
1790 vfprintf(stderr
, fmt
, ap
);
1791 fprintf(stderr
, "\n");
1793 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1795 cpu_dump_state(env
, stderr
, fprintf
, 0);
1797 if (qemu_log_enabled()) {
1798 qemu_log("qemu: fatal: ");
1799 qemu_log_vprintf(fmt
, ap2
);
1802 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1804 log_cpu_state(env
, 0);
1811 #if defined(CONFIG_USER_ONLY)
1813 struct sigaction act
;
1814 sigfillset(&act
.sa_mask
);
1815 act
.sa_handler
= SIG_DFL
;
1816 sigaction(SIGABRT
, &act
, NULL
);
1822 CPUState
*cpu_copy(CPUState
*env
)
1824 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1825 CPUState
*next_cpu
= new_env
->next_cpu
;
1826 int cpu_index
= new_env
->cpu_index
;
1827 #if defined(TARGET_HAS_ICE)
1832 memcpy(new_env
, env
, sizeof(CPUState
));
1834 /* Preserve chaining and index. */
1835 new_env
->next_cpu
= next_cpu
;
1836 new_env
->cpu_index
= cpu_index
;
1838 /* Clone all break/watchpoints.
1839 Note: Once we support ptrace with hw-debug register access, make sure
1840 BP_CPU break/watchpoints are handled correctly on clone. */
1841 QTAILQ_INIT(&env
->breakpoints
);
1842 QTAILQ_INIT(&env
->watchpoints
);
1843 #if defined(TARGET_HAS_ICE)
1844 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1845 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1847 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1848 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1856 #if !defined(CONFIG_USER_ONLY)
1858 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1862 /* Discard jump cache entries for any tb which might potentially
1863 overlap the flushed page. */
1864 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1865 memset (&env
->tb_jmp_cache
[i
], 0,
1866 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1868 i
= tb_jmp_cache_hash_page(addr
);
1869 memset (&env
->tb_jmp_cache
[i
], 0,
1870 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1873 static CPUTLBEntry s_cputlb_empty_entry
= {
1880 /* NOTE: if flush_global is true, also flush global entries (not
1882 void tlb_flush(CPUState
*env
, int flush_global
)
1886 #if defined(DEBUG_TLB)
1887 printf("tlb_flush:\n");
1889 /* must reset current TB so that interrupts cannot modify the
1890 links while we are modifying them */
1891 env
->current_tb
= NULL
;
1893 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1895 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1896 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1900 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1902 env
->tlb_flush_addr
= -1;
1903 env
->tlb_flush_mask
= 0;
1907 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1909 if (addr
== (tlb_entry
->addr_read
&
1910 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1911 addr
== (tlb_entry
->addr_write
&
1912 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1913 addr
== (tlb_entry
->addr_code
&
1914 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1915 *tlb_entry
= s_cputlb_empty_entry
;
1919 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1924 #if defined(DEBUG_TLB)
1925 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1927 /* Check if we need to flush due to large pages. */
1928 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1929 #if defined(DEBUG_TLB)
1930 printf("tlb_flush_page: forced full flush ("
1931 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1932 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env
->current_tb
= NULL
;
1941 addr
&= TARGET_PAGE_MASK
;
1942 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1943 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1944 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1946 tlb_flush_jmp_cache(env
, addr
);
1949 /* update the TLBs so that writes to code in the virtual page 'addr'
1951 static void tlb_protect_code(ram_addr_t ram_addr
)
1953 cpu_physical_memory_reset_dirty(ram_addr
,
1954 ram_addr
+ TARGET_PAGE_SIZE
,
1958 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1959 tested for self modifying code */
1960 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1963 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1966 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1967 unsigned long start
, unsigned long length
)
1970 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
1971 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1972 if ((addr
- start
) < length
) {
1973 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1978 /* Note: start and end must be within the same ram block. */
1979 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1983 unsigned long length
, start1
;
1986 start
&= TARGET_PAGE_MASK
;
1987 end
= TARGET_PAGE_ALIGN(end
);
1989 length
= end
- start
;
1992 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1994 /* we modify the TLB cache so that the dirty bit will be set again
1995 when accessing the range */
1996 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
1997 /* Check that we don't span multiple blocks - this breaks the
1998 address comparisons below. */
1999 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2000 != (end
- 1) - start
) {
2004 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2006 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2007 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2008 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2014 int cpu_physical_memory_set_dirty_tracking(int enable
)
2017 in_migration
= enable
;
2021 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2023 ram_addr_t ram_addr
;
2026 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2027 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2028 + tlb_entry
->addend
);
2029 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2030 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2031 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2036 /* update the TLB according to the current state of the dirty bits */
2037 void cpu_tlb_update_dirty(CPUState
*env
)
2041 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2042 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2043 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2047 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2049 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2050 tlb_entry
->addr_write
= vaddr
;
2053 /* update the TLB corresponding to virtual page vaddr
2054 so that it is no longer dirty */
2055 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2060 vaddr
&= TARGET_PAGE_MASK
;
2061 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2062 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2063 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2066 /* Our TLB does not support large pages, so remember the area covered by
2067 large pages and trigger a full TLB flush if these are invalidated. */
2068 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2071 target_ulong mask
= ~(size
- 1);
2073 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2074 env
->tlb_flush_addr
= vaddr
& mask
;
2075 env
->tlb_flush_mask
= mask
;
2078 /* Extend the existing region to include the new page.
2079 This is a compromise between unnecessary flushes and the cost
2080 of maintaining a full variable size TLB. */
2081 mask
&= env
->tlb_flush_mask
;
2082 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2085 env
->tlb_flush_addr
&= mask
;
2086 env
->tlb_flush_mask
= mask
;
2089 static bool is_ram_rom(ram_addr_t pd
)
2091 pd
&= ~TARGET_PAGE_MASK
;
2092 return pd
== io_mem_ram
.ram_addr
|| pd
== io_mem_rom
.ram_addr
;
2095 static bool is_ram_rom_romd(ram_addr_t pd
)
2097 return is_ram_rom(pd
) || (pd
& IO_MEM_ROMD
);
2100 /* Add a new TLB entry. At most one entry for a given virtual address
2101 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2102 supplied size is only used by tlb_flush_page. */
2103 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2104 target_phys_addr_t paddr
, int prot
,
2105 int mmu_idx
, target_ulong size
)
2110 target_ulong address
;
2111 target_ulong code_address
;
2112 unsigned long addend
;
2115 target_phys_addr_t iotlb
;
2117 assert(size
>= TARGET_PAGE_SIZE
);
2118 if (size
!= TARGET_PAGE_SIZE
) {
2119 tlb_add_large_page(env
, vaddr
, size
);
2121 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2123 #if defined(DEBUG_TLB)
2124 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2125 " prot=%x idx=%d pd=0x%08lx\n",
2126 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2130 if (!is_ram_rom_romd(pd
)) {
2131 /* IO memory case (romd handled later) */
2132 address
|= TLB_MMIO
;
2134 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2135 if (is_ram_rom(pd
)) {
2137 iotlb
= pd
& TARGET_PAGE_MASK
;
2138 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
)
2139 iotlb
|= io_mem_notdirty
.ram_addr
;
2141 iotlb
|= io_mem_rom
.ram_addr
;
2143 /* IO handlers are currently passed a physical address.
2144 It would be nice to pass an offset from the base address
2145 of that region. This would avoid having to special case RAM,
2146 and avoid full address decoding in every device.
2147 We can't use the high bits of pd for this because
2148 IO_MEM_ROMD uses these as a ram address. */
2149 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2150 iotlb
+= p
.region_offset
;
2153 code_address
= address
;
2154 /* Make accesses to pages with watchpoints go via the
2155 watchpoint trap routines. */
2156 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2157 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2158 /* Avoid trapping reads of pages with a write breakpoint. */
2159 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2160 iotlb
= io_mem_watch
+ paddr
;
2161 address
|= TLB_MMIO
;
2167 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2168 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2169 te
= &env
->tlb_table
[mmu_idx
][index
];
2170 te
->addend
= addend
- vaddr
;
2171 if (prot
& PAGE_READ
) {
2172 te
->addr_read
= address
;
2177 if (prot
& PAGE_EXEC
) {
2178 te
->addr_code
= code_address
;
2182 if (prot
& PAGE_WRITE
) {
2183 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_rom
.ram_addr
||
2184 (pd
& IO_MEM_ROMD
)) {
2185 /* Write access calls the I/O callback. */
2186 te
->addr_write
= address
| TLB_MMIO
;
2187 } else if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
&&
2188 !cpu_physical_memory_is_dirty(pd
)) {
2189 te
->addr_write
= address
| TLB_NOTDIRTY
;
2191 te
->addr_write
= address
;
2194 te
->addr_write
= -1;
2200 void tlb_flush(CPUState
*env
, int flush_global
)
2204 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2209 * Walks guest process memory "regions" one by one
2210 * and calls callback function 'fn' for each region.
2213 struct walk_memory_regions_data
2215 walk_memory_regions_fn fn
;
2217 unsigned long start
;
2221 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2222 abi_ulong end
, int new_prot
)
2224 if (data
->start
!= -1ul) {
2225 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2231 data
->start
= (new_prot
? end
: -1ul);
2232 data
->prot
= new_prot
;
2237 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2238 abi_ulong base
, int level
, void **lp
)
2244 return walk_memory_regions_end(data
, base
, 0);
2249 for (i
= 0; i
< L2_SIZE
; ++i
) {
2250 int prot
= pd
[i
].flags
;
2252 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2253 if (prot
!= data
->prot
) {
2254 rc
= walk_memory_regions_end(data
, pa
, prot
);
2262 for (i
= 0; i
< L2_SIZE
; ++i
) {
2263 pa
= base
| ((abi_ulong
)i
<<
2264 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2265 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2275 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2277 struct walk_memory_regions_data data
;
2285 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2286 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2287 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2293 return walk_memory_regions_end(&data
, 0, 0);
2296 static int dump_region(void *priv
, abi_ulong start
,
2297 abi_ulong end
, unsigned long prot
)
2299 FILE *f
= (FILE *)priv
;
2301 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2302 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2303 start
, end
, end
- start
,
2304 ((prot
& PAGE_READ
) ? 'r' : '-'),
2305 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2306 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2311 /* dump memory mappings */
2312 void page_dump(FILE *f
)
2314 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2315 "start", "end", "size", "prot");
2316 walk_memory_regions(f
, dump_region
);
2319 int page_get_flags(target_ulong address
)
2323 p
= page_find(address
>> TARGET_PAGE_BITS
);
2329 /* Modify the flags of a page and invalidate the code if necessary.
2330 The flag PAGE_WRITE_ORG is positioned automatically depending
2331 on PAGE_WRITE. The mmap_lock should already be held. */
2332 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2334 target_ulong addr
, len
;
2336 /* This function should never be called with addresses outside the
2337 guest address space. If this assert fires, it probably indicates
2338 a missing call to h2g_valid. */
2339 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2340 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2342 assert(start
< end
);
2344 start
= start
& TARGET_PAGE_MASK
;
2345 end
= TARGET_PAGE_ALIGN(end
);
2347 if (flags
& PAGE_WRITE
) {
2348 flags
|= PAGE_WRITE_ORG
;
2351 for (addr
= start
, len
= end
- start
;
2353 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2354 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2356 /* If the write protection bit is set, then we invalidate
2358 if (!(p
->flags
& PAGE_WRITE
) &&
2359 (flags
& PAGE_WRITE
) &&
2361 tb_invalidate_phys_page(addr
, 0, NULL
);
2367 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2373 /* This function should never be called with addresses outside the
2374 guest address space. If this assert fires, it probably indicates
2375 a missing call to h2g_valid. */
2376 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2377 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2383 if (start
+ len
- 1 < start
) {
2384 /* We've wrapped around. */
2388 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2389 start
= start
& TARGET_PAGE_MASK
;
2391 for (addr
= start
, len
= end
- start
;
2393 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2394 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2397 if( !(p
->flags
& PAGE_VALID
) )
2400 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2402 if (flags
& PAGE_WRITE
) {
2403 if (!(p
->flags
& PAGE_WRITE_ORG
))
2405 /* unprotect the page if it was put read-only because it
2406 contains translated code */
2407 if (!(p
->flags
& PAGE_WRITE
)) {
2408 if (!page_unprotect(addr
, 0, NULL
))
2417 /* called from signal handler: invalidate the code and unprotect the
2418 page. Return TRUE if the fault was successfully handled. */
2419 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2423 target_ulong host_start
, host_end
, addr
;
2425 /* Technically this isn't safe inside a signal handler. However we
2426 know this only ever happens in a synchronous SEGV handler, so in
2427 practice it seems to be ok. */
2430 p
= page_find(address
>> TARGET_PAGE_BITS
);
2436 /* if the page was really writable, then we change its
2437 protection back to writable */
2438 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2439 host_start
= address
& qemu_host_page_mask
;
2440 host_end
= host_start
+ qemu_host_page_size
;
2443 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2444 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2445 p
->flags
|= PAGE_WRITE
;
2448 /* and since the content will be modified, we must invalidate
2449 the corresponding translated code. */
2450 tb_invalidate_phys_page(addr
, pc
, puc
);
2451 #ifdef DEBUG_TB_CHECK
2452 tb_invalidate_check(addr
);
2455 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2465 static inline void tlb_set_dirty(CPUState
*env
,
2466 unsigned long addr
, target_ulong vaddr
)
2469 #endif /* defined(CONFIG_USER_ONLY) */
2471 #if !defined(CONFIG_USER_ONLY)
2473 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2474 typedef struct subpage_t
{
2475 target_phys_addr_t base
;
2476 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2477 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2480 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2481 ram_addr_t memory
, ram_addr_t region_offset
);
2482 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2483 ram_addr_t orig_memory
,
2484 ram_addr_t region_offset
);
2485 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2488 if (addr > start_addr) \
2491 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2492 if (start_addr2 > 0) \
2496 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2497 end_addr2 = TARGET_PAGE_SIZE - 1; \
2499 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2500 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2505 /* register physical memory.
2506 For RAM, 'size' must be a multiple of the target page size.
2507 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2508 io memory page. The address used when calling the IO function is
2509 the offset from the start of the region, plus region_offset. Both
2510 start_addr and region_offset are rounded down to a page boundary
2511 before calculating this offset. This should not be a problem unless
2512 the low bits of start_addr and region_offset differ. */
2513 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2514 bool readable
, bool readonly
)
2516 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2517 ram_addr_t size
= section
->size
;
2518 ram_addr_t phys_offset
= section
->mr
->ram_addr
;
2519 ram_addr_t region_offset
= section
->offset_within_region
;
2520 target_phys_addr_t addr
, end_addr
;
2523 ram_addr_t orig_size
= size
;
2526 if (memory_region_is_ram(section
->mr
)) {
2527 phys_offset
+= region_offset
;
2532 phys_offset
&= ~TARGET_PAGE_MASK
& ~IO_MEM_ROMD
;
2536 phys_offset
|= io_mem_rom
.ram_addr
;
2541 if (phys_offset
== io_mem_unassigned
.ram_addr
) {
2542 region_offset
= start_addr
;
2544 region_offset
&= TARGET_PAGE_MASK
;
2545 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2546 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2550 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 0);
2551 if (p
&& p
->phys_offset
!= io_mem_unassigned
.ram_addr
) {
2552 ram_addr_t orig_memory
= p
->phys_offset
;
2553 target_phys_addr_t start_addr2
, end_addr2
;
2554 int need_subpage
= 0;
2556 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2559 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2560 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2561 &p
->phys_offset
, orig_memory
,
2564 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2567 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2569 p
->region_offset
= 0;
2571 p
->phys_offset
= phys_offset
;
2572 p
->region_offset
= region_offset
;
2573 if (is_ram_rom_romd(phys_offset
))
2574 phys_offset
+= TARGET_PAGE_SIZE
;
2577 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2578 p
->phys_offset
= phys_offset
;
2579 p
->region_offset
= region_offset
;
2580 if (is_ram_rom_romd(phys_offset
)) {
2581 phys_offset
+= TARGET_PAGE_SIZE
;
2583 target_phys_addr_t start_addr2
, end_addr2
;
2584 int need_subpage
= 0;
2586 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2587 end_addr2
, need_subpage
);
2590 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2592 io_mem_unassigned
.ram_addr
,
2593 addr
& TARGET_PAGE_MASK
);
2594 subpage_register(subpage
, start_addr2
, end_addr2
,
2595 phys_offset
, region_offset
);
2596 p
->region_offset
= 0;
2600 region_offset
+= TARGET_PAGE_SIZE
;
2601 addr
+= TARGET_PAGE_SIZE
;
2602 } while (addr
!= end_addr
);
2604 /* since each CPU stores ram addresses in its TLB cache, we must
2605 reset the modified entries */
2607 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2612 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2615 kvm_coalesce_mmio_region(addr
, size
);
2618 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2621 kvm_uncoalesce_mmio_region(addr
, size
);
2624 void qemu_flush_coalesced_mmio_buffer(void)
2627 kvm_flush_coalesced_mmio_buffer();
2630 #if defined(__linux__) && !defined(TARGET_S390X)
2632 #include <sys/vfs.h>
2634 #define HUGETLBFS_MAGIC 0x958458f6
2636 static long gethugepagesize(const char *path
)
2642 ret
= statfs(path
, &fs
);
2643 } while (ret
!= 0 && errno
== EINTR
);
2650 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2651 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2656 static void *file_ram_alloc(RAMBlock
*block
,
2666 unsigned long hpagesize
;
2668 hpagesize
= gethugepagesize(path
);
2673 if (memory
< hpagesize
) {
2677 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2678 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2682 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2686 fd
= mkstemp(filename
);
2688 perror("unable to create backing store for hugepages");
2695 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2698 * ftruncate is not supported by hugetlbfs in older
2699 * hosts, so don't bother bailing out on errors.
2700 * If anything goes wrong with it under other filesystems,
2703 if (ftruncate(fd
, memory
))
2704 perror("ftruncate");
2707 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2708 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2709 * to sidestep this quirk.
2711 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2712 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2714 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2716 if (area
== MAP_FAILED
) {
2717 perror("file_ram_alloc: can't mmap RAM pages");
2726 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2728 RAMBlock
*block
, *next_block
;
2729 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2731 if (QLIST_EMPTY(&ram_list
.blocks
))
2734 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2735 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2737 end
= block
->offset
+ block
->length
;
2739 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2740 if (next_block
->offset
>= end
) {
2741 next
= MIN(next
, next_block
->offset
);
2744 if (next
- end
>= size
&& next
- end
< mingap
) {
2746 mingap
= next
- end
;
2750 if (offset
== RAM_ADDR_MAX
) {
2751 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2759 static ram_addr_t
last_ram_offset(void)
2762 ram_addr_t last
= 0;
2764 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2765 last
= MAX(last
, block
->offset
+ block
->length
);
2770 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2772 RAMBlock
*new_block
, *block
;
2775 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2776 if (block
->offset
== addr
) {
2782 assert(!new_block
->idstr
[0]);
2784 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2785 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2787 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2791 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2793 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2794 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2795 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2802 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2805 RAMBlock
*new_block
;
2807 size
= TARGET_PAGE_ALIGN(size
);
2808 new_block
= g_malloc0(sizeof(*new_block
));
2811 new_block
->offset
= find_ram_offset(size
);
2813 new_block
->host
= host
;
2814 new_block
->flags
|= RAM_PREALLOC_MASK
;
2817 #if defined (__linux__) && !defined(TARGET_S390X)
2818 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2819 if (!new_block
->host
) {
2820 new_block
->host
= qemu_vmalloc(size
);
2821 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2824 fprintf(stderr
, "-mem-path option unsupported\n");
2828 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2829 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2830 an system defined value, which is at least 256GB. Larger systems
2831 have larger values. We put the guest between the end of data
2832 segment (system break) and this value. We use 32GB as a base to
2833 have enough room for the system break to grow. */
2834 new_block
->host
= mmap((void*)0x800000000, size
,
2835 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2836 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2837 if (new_block
->host
== MAP_FAILED
) {
2838 fprintf(stderr
, "Allocating RAM failed\n");
2842 if (xen_enabled()) {
2843 xen_ram_alloc(new_block
->offset
, size
, mr
);
2845 new_block
->host
= qemu_vmalloc(size
);
2848 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2851 new_block
->length
= size
;
2853 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2855 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2856 last_ram_offset() >> TARGET_PAGE_BITS
);
2857 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2858 0xff, size
>> TARGET_PAGE_BITS
);
2861 kvm_setup_guest_memory(new_block
->host
, size
);
2863 return new_block
->offset
;
2866 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2868 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2871 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2875 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2876 if (addr
== block
->offset
) {
2877 QLIST_REMOVE(block
, next
);
2884 void qemu_ram_free(ram_addr_t addr
)
2888 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2889 if (addr
== block
->offset
) {
2890 QLIST_REMOVE(block
, next
);
2891 if (block
->flags
& RAM_PREALLOC_MASK
) {
2893 } else if (mem_path
) {
2894 #if defined (__linux__) && !defined(TARGET_S390X)
2896 munmap(block
->host
, block
->length
);
2899 qemu_vfree(block
->host
);
2905 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2906 munmap(block
->host
, block
->length
);
2908 if (xen_enabled()) {
2909 xen_invalidate_map_cache_entry(block
->host
);
2911 qemu_vfree(block
->host
);
2923 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2930 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2931 offset
= addr
- block
->offset
;
2932 if (offset
< block
->length
) {
2933 vaddr
= block
->host
+ offset
;
2934 if (block
->flags
& RAM_PREALLOC_MASK
) {
2938 munmap(vaddr
, length
);
2940 #if defined(__linux__) && !defined(TARGET_S390X)
2943 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2946 flags
|= MAP_PRIVATE
;
2948 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2949 flags
, block
->fd
, offset
);
2951 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2952 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2959 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2960 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2961 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2964 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2965 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2969 if (area
!= vaddr
) {
2970 fprintf(stderr
, "Could not remap addr: "
2971 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2975 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2981 #endif /* !_WIN32 */
2983 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2984 With the exception of the softmmu code in this file, this should
2985 only be used for local memory (e.g. video ram) that the device owns,
2986 and knows it isn't going to access beyond the end of the block.
2988 It should not be used for general purpose DMA.
2989 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2991 void *qemu_get_ram_ptr(ram_addr_t addr
)
2995 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2996 if (addr
- block
->offset
< block
->length
) {
2997 /* Move this entry to to start of the list. */
2998 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2999 QLIST_REMOVE(block
, next
);
3000 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3002 if (xen_enabled()) {
3003 /* We need to check if the requested address is in the RAM
3004 * because we don't want to map the entire memory in QEMU.
3005 * In that case just map until the end of the page.
3007 if (block
->offset
== 0) {
3008 return xen_map_cache(addr
, 0, 0);
3009 } else if (block
->host
== NULL
) {
3011 xen_map_cache(block
->offset
, block
->length
, 1);
3014 return block
->host
+ (addr
- block
->offset
);
3018 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3024 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3025 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3027 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3031 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3032 if (addr
- block
->offset
< block
->length
) {
3033 if (xen_enabled()) {
3034 /* We need to check if the requested address is in the RAM
3035 * because we don't want to map the entire memory in QEMU.
3036 * In that case just map until the end of the page.
3038 if (block
->offset
== 0) {
3039 return xen_map_cache(addr
, 0, 0);
3040 } else if (block
->host
== NULL
) {
3042 xen_map_cache(block
->offset
, block
->length
, 1);
3045 return block
->host
+ (addr
- block
->offset
);
3049 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3055 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3056 * but takes a size argument */
3057 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3062 if (xen_enabled()) {
3063 return xen_map_cache(addr
, *size
, 1);
3067 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3068 if (addr
- block
->offset
< block
->length
) {
3069 if (addr
- block
->offset
+ *size
> block
->length
)
3070 *size
= block
->length
- addr
+ block
->offset
;
3071 return block
->host
+ (addr
- block
->offset
);
3075 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3080 void qemu_put_ram_ptr(void *addr
)
3082 trace_qemu_put_ram_ptr(addr
);
3085 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3088 uint8_t *host
= ptr
;
3090 if (xen_enabled()) {
3091 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3095 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3096 /* This case append when the block is not mapped. */
3097 if (block
->host
== NULL
) {
3100 if (host
- block
->host
< block
->length
) {
3101 *ram_addr
= block
->offset
+ (host
- block
->host
);
3109 /* Some of the softmmu routines need to translate from a host pointer
3110 (typically a TLB entry) back to a ram offset. */
3111 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3113 ram_addr_t ram_addr
;
3115 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3116 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3122 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
3125 #ifdef DEBUG_UNASSIGNED
3126 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3128 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3129 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
3134 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
3135 uint64_t val
, unsigned size
)
3137 #ifdef DEBUG_UNASSIGNED
3138 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
3140 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3141 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
3145 static const MemoryRegionOps unassigned_mem_ops
= {
3146 .read
= unassigned_mem_read
,
3147 .write
= unassigned_mem_write
,
3148 .endianness
= DEVICE_NATIVE_ENDIAN
,
3151 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
3157 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
3158 uint64_t value
, unsigned size
)
3163 static const MemoryRegionOps error_mem_ops
= {
3164 .read
= error_mem_read
,
3165 .write
= error_mem_write
,
3166 .endianness
= DEVICE_NATIVE_ENDIAN
,
3169 static const MemoryRegionOps rom_mem_ops
= {
3170 .read
= error_mem_read
,
3171 .write
= unassigned_mem_write
,
3172 .endianness
= DEVICE_NATIVE_ENDIAN
,
3175 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
3176 uint64_t val
, unsigned size
)
3179 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3180 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3181 #if !defined(CONFIG_USER_ONLY)
3182 tb_invalidate_phys_page_fast(ram_addr
, size
);
3183 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3188 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3191 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3194 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3199 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3200 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3201 /* we remove the notdirty callback only if the code has been
3203 if (dirty_flags
== 0xff)
3204 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3207 static const MemoryRegionOps notdirty_mem_ops
= {
3208 .read
= error_mem_read
,
3209 .write
= notdirty_mem_write
,
3210 .endianness
= DEVICE_NATIVE_ENDIAN
,
3213 /* Generate a debug exception if a watchpoint has been hit. */
3214 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3216 CPUState
*env
= cpu_single_env
;
3217 target_ulong pc
, cs_base
;
3218 TranslationBlock
*tb
;
3223 if (env
->watchpoint_hit
) {
3224 /* We re-entered the check after replacing the TB. Now raise
3225 * the debug interrupt so that is will trigger after the
3226 * current instruction. */
3227 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3230 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3231 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3232 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3233 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3234 wp
->flags
|= BP_WATCHPOINT_HIT
;
3235 if (!env
->watchpoint_hit
) {
3236 env
->watchpoint_hit
= wp
;
3237 tb
= tb_find_pc(env
->mem_io_pc
);
3239 cpu_abort(env
, "check_watchpoint: could not find TB for "
3240 "pc=%p", (void *)env
->mem_io_pc
);
3242 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3243 tb_phys_invalidate(tb
, -1);
3244 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3245 env
->exception_index
= EXCP_DEBUG
;
3247 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3248 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3250 cpu_resume_from_signal(env
, NULL
);
3253 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3258 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3259 so these check for a hit then pass through to the normal out-of-line
3261 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3263 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3264 return ldub_phys(addr
);
3267 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3269 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3270 return lduw_phys(addr
);
3273 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3275 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3276 return ldl_phys(addr
);
3279 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3282 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3283 stb_phys(addr
, val
);
3286 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3289 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3290 stw_phys(addr
, val
);
3293 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3296 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3297 stl_phys(addr
, val
);
3300 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3306 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3312 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3313 target_phys_addr_t addr
,
3316 unsigned int idx
= SUBPAGE_IDX(addr
);
3317 #if defined(DEBUG_SUBPAGE)
3318 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3319 mmio
, len
, addr
, idx
);
3322 addr
+= mmio
->region_offset
[idx
];
3323 idx
= mmio
->sub_io_index
[idx
];
3324 return io_mem_read(idx
, addr
, 1 <<len
);
3327 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3328 uint32_t value
, unsigned int len
)
3330 unsigned int idx
= SUBPAGE_IDX(addr
);
3331 #if defined(DEBUG_SUBPAGE)
3332 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3333 __func__
, mmio
, len
, addr
, idx
, value
);
3336 addr
+= mmio
->region_offset
[idx
];
3337 idx
= mmio
->sub_io_index
[idx
];
3338 io_mem_write(idx
, addr
, value
, 1 << len
);
3341 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3343 return subpage_readlen(opaque
, addr
, 0);
3346 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3349 subpage_writelen(opaque
, addr
, value
, 0);
3352 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3354 return subpage_readlen(opaque
, addr
, 1);
3357 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3360 subpage_writelen(opaque
, addr
, value
, 1);
3363 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3365 return subpage_readlen(opaque
, addr
, 2);
3368 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3371 subpage_writelen(opaque
, addr
, value
, 2);
3374 static CPUReadMemoryFunc
* const subpage_read
[] = {
3380 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3386 static uint32_t subpage_ram_readb(void *opaque
, target_phys_addr_t addr
)
3388 ram_addr_t raddr
= addr
;
3389 void *ptr
= qemu_get_ram_ptr(raddr
);
3393 static void subpage_ram_writeb(void *opaque
, target_phys_addr_t addr
,
3396 ram_addr_t raddr
= addr
;
3397 void *ptr
= qemu_get_ram_ptr(raddr
);
3401 static uint32_t subpage_ram_readw(void *opaque
, target_phys_addr_t addr
)
3403 ram_addr_t raddr
= addr
;
3404 void *ptr
= qemu_get_ram_ptr(raddr
);
3408 static void subpage_ram_writew(void *opaque
, target_phys_addr_t addr
,
3411 ram_addr_t raddr
= addr
;
3412 void *ptr
= qemu_get_ram_ptr(raddr
);
3416 static uint32_t subpage_ram_readl(void *opaque
, target_phys_addr_t addr
)
3418 ram_addr_t raddr
= addr
;
3419 void *ptr
= qemu_get_ram_ptr(raddr
);
3423 static void subpage_ram_writel(void *opaque
, target_phys_addr_t addr
,
3426 ram_addr_t raddr
= addr
;
3427 void *ptr
= qemu_get_ram_ptr(raddr
);
3431 static CPUReadMemoryFunc
* const subpage_ram_read
[] = {
3437 static CPUWriteMemoryFunc
* const subpage_ram_write
[] = {
3438 &subpage_ram_writeb
,
3439 &subpage_ram_writew
,
3440 &subpage_ram_writel
,
3443 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3444 ram_addr_t memory
, ram_addr_t region_offset
)
3448 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3450 idx
= SUBPAGE_IDX(start
);
3451 eidx
= SUBPAGE_IDX(end
);
3452 #if defined(DEBUG_SUBPAGE)
3453 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3454 mmio
, start
, end
, idx
, eidx
, memory
);
3456 if ((memory
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
3457 memory
= IO_MEM_SUBPAGE_RAM
;
3459 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3460 for (; idx
<= eidx
; idx
++) {
3461 mmio
->sub_io_index
[idx
] = memory
;
3462 mmio
->region_offset
[idx
] = region_offset
;
3468 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3469 ram_addr_t orig_memory
,
3470 ram_addr_t region_offset
)
3475 mmio
= g_malloc0(sizeof(subpage_t
));
3478 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3479 #if defined(DEBUG_SUBPAGE)
3480 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3481 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3483 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3484 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3489 static int get_free_io_mem_idx(void)
3493 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3494 if (!io_mem_used
[i
]) {
3498 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3502 /* mem_read and mem_write are arrays of functions containing the
3503 function to access byte (index 0), word (index 1) and dword (index
3504 2). Functions can be omitted with a NULL function pointer.
3505 If io_index is non zero, the corresponding io zone is
3506 modified. If it is zero, a new io zone is allocated. The return
3507 value can be used with cpu_register_physical_memory(). (-1) is
3508 returned if error. */
3509 static int cpu_register_io_memory_fixed(int io_index
,
3510 CPUReadMemoryFunc
* const *mem_read
,
3511 CPUWriteMemoryFunc
* const *mem_write
,
3516 if (io_index
<= 0) {
3517 io_index
= get_free_io_mem_idx();
3521 io_index
>>= IO_MEM_SHIFT
;
3522 if (io_index
>= IO_MEM_NB_ENTRIES
)
3526 for (i
= 0; i
< 3; ++i
) {
3527 assert(mem_read
[i
]);
3528 _io_mem_read
[io_index
][i
] = mem_read
[i
];
3530 for (i
= 0; i
< 3; ++i
) {
3531 assert(mem_write
[i
]);
3532 _io_mem_write
[io_index
][i
] = mem_write
[i
];
3534 io_mem_opaque
[io_index
] = opaque
;
3536 return (io_index
<< IO_MEM_SHIFT
);
3539 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3540 CPUWriteMemoryFunc
* const *mem_write
,
3543 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3546 void cpu_unregister_io_memory(int io_table_address
)
3549 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3551 for (i
=0;i
< 3; i
++) {
3552 _io_mem_read
[io_index
][i
] = NULL
;
3553 _io_mem_write
[io_index
][i
] = NULL
;
3555 io_mem_opaque
[io_index
] = NULL
;
3556 io_mem_used
[io_index
] = 0;
3559 static void io_mem_init(void)
3563 /* Must be first: */
3564 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3565 assert(io_mem_ram
.ram_addr
== 0);
3566 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3567 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3568 "unassigned", UINT64_MAX
);
3569 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3570 "notdirty", UINT64_MAX
);
3571 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM
, subpage_ram_read
,
3572 subpage_ram_write
, NULL
);
3576 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3577 watch_mem_write
, NULL
);
3580 static void memory_map_init(void)
3582 system_memory
= g_malloc(sizeof(*system_memory
));
3583 memory_region_init(system_memory
, "system", INT64_MAX
);
3584 set_system_memory_map(system_memory
);
3586 system_io
= g_malloc(sizeof(*system_io
));
3587 memory_region_init(system_io
, "io", 65536);
3588 set_system_io_map(system_io
);
3591 MemoryRegion
*get_system_memory(void)
3593 return system_memory
;
3596 MemoryRegion
*get_system_io(void)
3601 #endif /* !defined(CONFIG_USER_ONLY) */
3603 /* physical memory access (slow version, mainly for debug) */
3604 #if defined(CONFIG_USER_ONLY)
3605 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3606 uint8_t *buf
, int len
, int is_write
)
3613 page
= addr
& TARGET_PAGE_MASK
;
3614 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3617 flags
= page_get_flags(page
);
3618 if (!(flags
& PAGE_VALID
))
3621 if (!(flags
& PAGE_WRITE
))
3623 /* XXX: this code should not depend on lock_user */
3624 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3627 unlock_user(p
, addr
, l
);
3629 if (!(flags
& PAGE_READ
))
3631 /* XXX: this code should not depend on lock_user */
3632 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3635 unlock_user(p
, addr
, 0);
3645 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3646 int len
, int is_write
)
3651 target_phys_addr_t page
;
3656 page
= addr
& TARGET_PAGE_MASK
;
3657 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3660 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3664 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3665 target_phys_addr_t addr1
;
3666 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3667 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3668 /* XXX: could force cpu_single_env to NULL to avoid
3670 if (l
>= 4 && ((addr1
& 3) == 0)) {
3671 /* 32 bit write access */
3673 io_mem_write(io_index
, addr1
, val
, 4);
3675 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3676 /* 16 bit write access */
3678 io_mem_write(io_index
, addr1
, val
, 2);
3681 /* 8 bit write access */
3683 io_mem_write(io_index
, addr1
, val
, 1);
3688 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3690 ptr
= qemu_get_ram_ptr(addr1
);
3691 memcpy(ptr
, buf
, l
);
3692 if (!cpu_physical_memory_is_dirty(addr1
)) {
3693 /* invalidate code */
3694 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3696 cpu_physical_memory_set_dirty_flags(
3697 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3699 qemu_put_ram_ptr(ptr
);
3702 if (!is_ram_rom_romd(pd
)) {
3703 target_phys_addr_t addr1
;
3705 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3706 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3707 if (l
>= 4 && ((addr1
& 3) == 0)) {
3708 /* 32 bit read access */
3709 val
= io_mem_read(io_index
, addr1
, 4);
3712 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3713 /* 16 bit read access */
3714 val
= io_mem_read(io_index
, addr1
, 2);
3718 /* 8 bit read access */
3719 val
= io_mem_read(io_index
, addr1
, 1);
3725 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3726 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3727 qemu_put_ram_ptr(ptr
);
3736 /* used for ROM loading : can write in RAM and ROM */
3737 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3738 const uint8_t *buf
, int len
)
3742 target_phys_addr_t page
;
3747 page
= addr
& TARGET_PAGE_MASK
;
3748 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3751 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3754 if (!is_ram_rom_romd(pd
)) {
3757 unsigned long addr1
;
3758 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3760 ptr
= qemu_get_ram_ptr(addr1
);
3761 memcpy(ptr
, buf
, l
);
3762 qemu_put_ram_ptr(ptr
);
3772 target_phys_addr_t addr
;
3773 target_phys_addr_t len
;
3776 static BounceBuffer bounce
;
3778 typedef struct MapClient
{
3780 void (*callback
)(void *opaque
);
3781 QLIST_ENTRY(MapClient
) link
;
3784 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3785 = QLIST_HEAD_INITIALIZER(map_client_list
);
3787 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3789 MapClient
*client
= g_malloc(sizeof(*client
));
3791 client
->opaque
= opaque
;
3792 client
->callback
= callback
;
3793 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3797 void cpu_unregister_map_client(void *_client
)
3799 MapClient
*client
= (MapClient
*)_client
;
3801 QLIST_REMOVE(client
, link
);
3805 static void cpu_notify_map_clients(void)
3809 while (!QLIST_EMPTY(&map_client_list
)) {
3810 client
= QLIST_FIRST(&map_client_list
);
3811 client
->callback(client
->opaque
);
3812 cpu_unregister_map_client(client
);
3816 /* Map a physical memory region into a host virtual address.
3817 * May map a subset of the requested range, given by and returned in *plen.
3818 * May return NULL if resources needed to perform the mapping are exhausted.
3819 * Use only for reads OR writes - not for read-modify-write operations.
3820 * Use cpu_register_map_client() to know when retrying the map operation is
3821 * likely to succeed.
3823 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3824 target_phys_addr_t
*plen
,
3827 target_phys_addr_t len
= *plen
;
3828 target_phys_addr_t todo
= 0;
3830 target_phys_addr_t page
;
3833 ram_addr_t raddr
= RAM_ADDR_MAX
;
3838 page
= addr
& TARGET_PAGE_MASK
;
3839 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3842 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3845 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3846 if (todo
|| bounce
.buffer
) {
3849 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3853 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3857 return bounce
.buffer
;
3860 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3868 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3873 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3874 * Will also mark the memory as dirty if is_write == 1. access_len gives
3875 * the amount of memory that was actually read or written by the caller.
3877 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3878 int is_write
, target_phys_addr_t access_len
)
3880 if (buffer
!= bounce
.buffer
) {
3882 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3883 while (access_len
) {
3885 l
= TARGET_PAGE_SIZE
;
3888 if (!cpu_physical_memory_is_dirty(addr1
)) {
3889 /* invalidate code */
3890 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3892 cpu_physical_memory_set_dirty_flags(
3893 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3899 if (xen_enabled()) {
3900 xen_invalidate_map_cache_entry(buffer
);
3905 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3907 qemu_vfree(bounce
.buffer
);
3908 bounce
.buffer
= NULL
;
3909 cpu_notify_map_clients();
3912 /* warning: addr must be aligned */
3913 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3914 enum device_endian endian
)
3922 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3925 if (!is_ram_rom_romd(pd
)) {
3927 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3928 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3929 val
= io_mem_read(io_index
, addr
, 4);
3930 #if defined(TARGET_WORDS_BIGENDIAN)
3931 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3935 if (endian
== DEVICE_BIG_ENDIAN
) {
3941 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3942 (addr
& ~TARGET_PAGE_MASK
);
3944 case DEVICE_LITTLE_ENDIAN
:
3945 val
= ldl_le_p(ptr
);
3947 case DEVICE_BIG_ENDIAN
:
3948 val
= ldl_be_p(ptr
);
3958 uint32_t ldl_phys(target_phys_addr_t addr
)
3960 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3963 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3965 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3968 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3970 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3973 /* warning: addr must be aligned */
3974 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3975 enum device_endian endian
)
3983 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3986 if (!is_ram_rom_romd(pd
)) {
3988 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3989 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3991 /* XXX This is broken when device endian != cpu endian.
3992 Fix and add "endian" variable check */
3993 #ifdef TARGET_WORDS_BIGENDIAN
3994 val
= io_mem_read(io_index
, addr
, 4) << 32;
3995 val
|= io_mem_read(io_index
, addr
+ 4, 4);
3997 val
= io_mem_read(io_index
, addr
, 4);
3998 val
|= io_mem_read(io_index
, addr
+ 4, 4) << 32;
4002 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4003 (addr
& ~TARGET_PAGE_MASK
);
4005 case DEVICE_LITTLE_ENDIAN
:
4006 val
= ldq_le_p(ptr
);
4008 case DEVICE_BIG_ENDIAN
:
4009 val
= ldq_be_p(ptr
);
4019 uint64_t ldq_phys(target_phys_addr_t addr
)
4021 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4024 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4026 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4029 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4031 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4035 uint32_t ldub_phys(target_phys_addr_t addr
)
4038 cpu_physical_memory_read(addr
, &val
, 1);
4042 /* warning: addr must be aligned */
4043 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4044 enum device_endian endian
)
4052 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4055 if (!is_ram_rom_romd(pd
)) {
4057 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4058 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4059 val
= io_mem_read(io_index
, addr
, 2);
4060 #if defined(TARGET_WORDS_BIGENDIAN)
4061 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4065 if (endian
== DEVICE_BIG_ENDIAN
) {
4071 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4072 (addr
& ~TARGET_PAGE_MASK
);
4074 case DEVICE_LITTLE_ENDIAN
:
4075 val
= lduw_le_p(ptr
);
4077 case DEVICE_BIG_ENDIAN
:
4078 val
= lduw_be_p(ptr
);
4088 uint32_t lduw_phys(target_phys_addr_t addr
)
4090 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4093 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4095 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4098 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4100 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4103 /* warning: addr must be aligned. The ram page is not masked as dirty
4104 and the code inside is not invalidated. It is useful if the dirty
4105 bits are used to track modified PTEs */
4106 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4113 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4116 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4117 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4118 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4119 io_mem_write(io_index
, addr
, val
, 4);
4121 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4122 ptr
= qemu_get_ram_ptr(addr1
);
4125 if (unlikely(in_migration
)) {
4126 if (!cpu_physical_memory_is_dirty(addr1
)) {
4127 /* invalidate code */
4128 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4130 cpu_physical_memory_set_dirty_flags(
4131 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4137 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4144 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4147 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4148 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4149 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4150 #ifdef TARGET_WORDS_BIGENDIAN
4151 io_mem_write(io_index
, addr
, val
>> 32, 4);
4152 io_mem_write(io_index
, addr
+ 4, (uint32_t)val
, 4);
4154 io_mem_write(io_index
, addr
, (uint32_t)val
, 4);
4155 io_mem_write(io_index
, addr
+ 4, val
>> 32, 4);
4158 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4159 (addr
& ~TARGET_PAGE_MASK
);
4164 /* warning: addr must be aligned */
4165 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4166 enum device_endian endian
)
4173 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4176 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4177 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4178 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4179 #if defined(TARGET_WORDS_BIGENDIAN)
4180 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4184 if (endian
== DEVICE_BIG_ENDIAN
) {
4188 io_mem_write(io_index
, addr
, val
, 4);
4190 unsigned long addr1
;
4191 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4193 ptr
= qemu_get_ram_ptr(addr1
);
4195 case DEVICE_LITTLE_ENDIAN
:
4198 case DEVICE_BIG_ENDIAN
:
4205 if (!cpu_physical_memory_is_dirty(addr1
)) {
4206 /* invalidate code */
4207 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4209 cpu_physical_memory_set_dirty_flags(addr1
,
4210 (0xff & ~CODE_DIRTY_FLAG
));
4215 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4217 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4220 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4222 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4225 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4227 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4231 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4234 cpu_physical_memory_write(addr
, &v
, 1);
4237 /* warning: addr must be aligned */
4238 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4239 enum device_endian endian
)
4246 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4249 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4250 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4251 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4252 #if defined(TARGET_WORDS_BIGENDIAN)
4253 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4257 if (endian
== DEVICE_BIG_ENDIAN
) {
4261 io_mem_write(io_index
, addr
, val
, 2);
4263 unsigned long addr1
;
4264 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4266 ptr
= qemu_get_ram_ptr(addr1
);
4268 case DEVICE_LITTLE_ENDIAN
:
4271 case DEVICE_BIG_ENDIAN
:
4278 if (!cpu_physical_memory_is_dirty(addr1
)) {
4279 /* invalidate code */
4280 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4282 cpu_physical_memory_set_dirty_flags(addr1
,
4283 (0xff & ~CODE_DIRTY_FLAG
));
4288 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4290 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4293 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4295 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4298 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4300 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4304 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4307 cpu_physical_memory_write(addr
, &val
, 8);
4310 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4312 val
= cpu_to_le64(val
);
4313 cpu_physical_memory_write(addr
, &val
, 8);
4316 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4318 val
= cpu_to_be64(val
);
4319 cpu_physical_memory_write(addr
, &val
, 8);
4322 /* virtual memory access for debug (includes writing to ROM) */
4323 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4324 uint8_t *buf
, int len
, int is_write
)
4327 target_phys_addr_t phys_addr
;
4331 page
= addr
& TARGET_PAGE_MASK
;
4332 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4333 /* if no physical page mapped, return an error */
4334 if (phys_addr
== -1)
4336 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4339 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4341 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4343 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4352 /* in deterministic execution mode, instructions doing device I/Os
4353 must be at the end of the TB */
4354 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4356 TranslationBlock
*tb
;
4358 target_ulong pc
, cs_base
;
4361 tb
= tb_find_pc((unsigned long)retaddr
);
4363 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4366 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4367 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4368 /* Calculate how many instructions had been executed before the fault
4370 n
= n
- env
->icount_decr
.u16
.low
;
4371 /* Generate a new TB ending on the I/O insn. */
4373 /* On MIPS and SH, delay slot instructions can only be restarted if
4374 they were already the first instruction in the TB. If this is not
4375 the first instruction in a TB then re-execute the preceding
4377 #if defined(TARGET_MIPS)
4378 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4379 env
->active_tc
.PC
-= 4;
4380 env
->icount_decr
.u16
.low
++;
4381 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4383 #elif defined(TARGET_SH4)
4384 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4387 env
->icount_decr
.u16
.low
++;
4388 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4391 /* This should never happen. */
4392 if (n
> CF_COUNT_MASK
)
4393 cpu_abort(env
, "TB too big during recompile");
4395 cflags
= n
| CF_LAST_IO
;
4397 cs_base
= tb
->cs_base
;
4399 tb_phys_invalidate(tb
, -1);
4400 /* FIXME: In theory this could raise an exception. In practice
4401 we have already translated the block once so it's probably ok. */
4402 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4403 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4404 the first in the TB) then we end up generating a whole new TB and
4405 repeating the fault, which is horribly inefficient.
4406 Better would be to execute just this insn uncached, or generate a
4408 cpu_resume_from_signal(env
, NULL
);
4411 #if !defined(CONFIG_USER_ONLY)
4413 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4415 int i
, target_code_size
, max_target_code_size
;
4416 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4417 TranslationBlock
*tb
;
4419 target_code_size
= 0;
4420 max_target_code_size
= 0;
4422 direct_jmp_count
= 0;
4423 direct_jmp2_count
= 0;
4424 for(i
= 0; i
< nb_tbs
; i
++) {
4426 target_code_size
+= tb
->size
;
4427 if (tb
->size
> max_target_code_size
)
4428 max_target_code_size
= tb
->size
;
4429 if (tb
->page_addr
[1] != -1)
4431 if (tb
->tb_next_offset
[0] != 0xffff) {
4433 if (tb
->tb_next_offset
[1] != 0xffff) {
4434 direct_jmp2_count
++;
4438 /* XXX: avoid using doubles ? */
4439 cpu_fprintf(f
, "Translation buffer state:\n");
4440 cpu_fprintf(f
, "gen code size %td/%ld\n",
4441 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4442 cpu_fprintf(f
, "TB count %d/%d\n",
4443 nb_tbs
, code_gen_max_blocks
);
4444 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4445 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4446 max_target_code_size
);
4447 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4448 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4449 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4450 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4452 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4453 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4455 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4457 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4458 cpu_fprintf(f
, "\nStatistics:\n");
4459 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4460 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4461 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4462 tcg_dump_info(f
, cpu_fprintf
);
4465 /* NOTE: this function can trigger an exception */
4466 /* NOTE2: the returned address is not exactly the physical address: it
4467 is the offset relative to phys_ram_base */
4468 tb_page_addr_t
get_page_addr_code(CPUState
*env1
, target_ulong addr
)
4470 int mmu_idx
, page_index
, pd
;
4473 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
4474 mmu_idx
= cpu_mmu_index(env1
);
4475 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
4476 (addr
& TARGET_PAGE_MASK
))) {
4479 pd
= env1
->tlb_table
[mmu_idx
][page_index
].addr_code
& ~TARGET_PAGE_MASK
;
4480 if (pd
!= io_mem_ram
.ram_addr
&& pd
!= io_mem_rom
.ram_addr
4481 && !(pd
& IO_MEM_ROMD
)) {
4482 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4483 cpu_unassigned_access(env1
, addr
, 0, 1, 0, 4);
4485 cpu_abort(env1
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
4488 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
4489 return qemu_ram_addr_from_host_nofail(p
);
4492 #define MMUSUFFIX _cmmu
4494 #define GETPC() NULL
4495 #define env cpu_single_env
4496 #define SOFTMMU_CODE_ACCESS
4499 #include "softmmu_template.h"
4502 #include "softmmu_template.h"
4505 #include "softmmu_template.h"
4508 #include "softmmu_template.h"