2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
124 /* current CPU in the current thread. It is only valid inside
126 DEFINE_TLS(CPUState
*,cpu_single_env
);
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
132 typedef struct PageDesc
{
133 /* list of TBs intersecting this ram page */
134 TranslationBlock
*first_tb
;
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count
;
138 uint8_t *code_bitmap
;
139 #if defined(CONFIG_USER_ONLY)
144 /* In system mode we want L1_MAP to be based on ram offsets,
145 while in user mode we want it to be based on virtual addresses. */
146 #if !defined(CONFIG_USER_ONLY)
147 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
156 /* Size of the L2 (and L3, etc) page tables. */
158 #define L2_SIZE (1 << L2_BITS)
160 /* The bits remaining after N lower levels of page tables. */
161 #define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 #define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166 /* Size of the L1 page table. Avoid silly small sizes. */
167 #if P_L1_BITS_REM < 4
168 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
170 #define P_L1_BITS P_L1_BITS_REM
173 #if V_L1_BITS_REM < 4
174 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
176 #define V_L1_BITS V_L1_BITS_REM
179 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
182 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
185 unsigned long qemu_real_host_page_size
;
186 unsigned long qemu_host_page_size
;
187 unsigned long qemu_host_page_mask
;
189 /* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191 static void *l1_map
[V_L1_SIZE
];
193 #if !defined(CONFIG_USER_ONLY)
194 typedef struct PhysPageDesc
{
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset
;
197 ram_addr_t region_offset
;
200 /* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202 static void *l1_phys_map
[P_L1_SIZE
];
204 static void io_mem_init(void);
205 static void memory_map_init(void);
207 /* io memory support */
208 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
209 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
210 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
211 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
212 static int io_mem_watch
;
217 static const char *logfilename
= "qemu.log";
219 static const char *logfilename
= "/tmp/qemu.log";
223 static int log_append
= 0;
226 #if !defined(CONFIG_USER_ONLY)
227 static int tlb_flush_count
;
229 static int tb_flush_count
;
230 static int tb_phys_invalidate_count
;
233 static void map_exec(void *addr
, long size
)
236 VirtualProtect(addr
, size
,
237 PAGE_EXECUTE_READWRITE
, &old_protect
);
241 static void map_exec(void *addr
, long size
)
243 unsigned long start
, end
, page_size
;
245 page_size
= getpagesize();
246 start
= (unsigned long)addr
;
247 start
&= ~(page_size
- 1);
249 end
= (unsigned long)addr
+ size
;
250 end
+= page_size
- 1;
251 end
&= ~(page_size
- 1);
253 mprotect((void *)start
, end
- start
,
254 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
258 static void page_init(void)
260 /* NOTE: we can always suppose that qemu_host_page_size >=
264 SYSTEM_INFO system_info
;
266 GetSystemInfo(&system_info
);
267 qemu_real_host_page_size
= system_info
.dwPageSize
;
270 qemu_real_host_page_size
= getpagesize();
272 if (qemu_host_page_size
== 0)
273 qemu_host_page_size
= qemu_real_host_page_size
;
274 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
275 qemu_host_page_size
= TARGET_PAGE_SIZE
;
276 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
278 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280 #ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry
*freep
;
284 freep
= kinfo_getvmmap(getpid(), &cnt
);
287 for (i
= 0; i
< cnt
; i
++) {
288 unsigned long startaddr
, endaddr
;
290 startaddr
= freep
[i
].kve_start
;
291 endaddr
= freep
[i
].kve_end
;
292 if (h2g_valid(startaddr
)) {
293 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
295 if (h2g_valid(endaddr
)) {
296 endaddr
= h2g(endaddr
);
297 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
299 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
312 last_brk
= (unsigned long)sbrk(0);
314 f
= fopen("/compat/linux/proc/self/maps", "r");
319 unsigned long startaddr
, endaddr
;
322 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
324 if (n
== 2 && h2g_valid(startaddr
)) {
325 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
327 if (h2g_valid(endaddr
)) {
328 endaddr
= h2g(endaddr
);
332 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
344 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
350 #if defined(CONFIG_USER_ONLY)
351 /* We can't use g_malloc because it may recurse into a locked mutex. */
352 # define ALLOC(P, SIZE) \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
358 # define ALLOC(P, SIZE) \
359 do { P = g_malloc0(SIZE); } while (0)
362 /* Level 1. Always allocated. */
363 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
366 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
373 ALLOC(p
, sizeof(void *) * L2_SIZE
);
377 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
385 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
391 return pd
+ (index
& (L2_SIZE
- 1));
394 static inline PageDesc
*page_find(tb_page_addr_t index
)
396 return page_find_alloc(index
, 0);
399 #if !defined(CONFIG_USER_ONLY)
400 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
406 /* Level 1. Always allocated. */
407 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
410 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
416 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
418 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
424 int first_index
= index
& ~(L2_SIZE
- 1);
430 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
432 for (i
= 0; i
< L2_SIZE
; i
++) {
433 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
434 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
438 return pd
+ (index
& (L2_SIZE
- 1));
441 static inline PhysPageDesc
phys_page_find(target_phys_addr_t index
)
443 PhysPageDesc
*p
= phys_page_find_alloc(index
, 0);
448 return (PhysPageDesc
) {
449 .phys_offset
= IO_MEM_UNASSIGNED
,
450 .region_offset
= index
<< TARGET_PAGE_BITS
,
455 static void tlb_protect_code(ram_addr_t ram_addr
);
456 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
458 #define mmap_lock() do { } while(0)
459 #define mmap_unlock() do { } while(0)
462 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464 #if defined(CONFIG_USER_ONLY)
465 /* Currently it is not recommended to allocate big chunks of data in
466 user mode. It will change when a dedicated libc will be used */
467 #define USE_STATIC_CODE_GEN_BUFFER
470 #ifdef USE_STATIC_CODE_GEN_BUFFER
471 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
472 __attribute__((aligned (CODE_GEN_ALIGN
)));
475 static void code_gen_alloc(unsigned long tb_size
)
477 #ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer
= static_code_gen_buffer
;
479 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
480 map_exec(code_gen_buffer
, code_gen_buffer_size
);
482 code_gen_buffer_size
= tb_size
;
483 if (code_gen_buffer_size
== 0) {
484 #if defined(CONFIG_USER_ONLY)
485 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
487 /* XXX: needs adjustments */
488 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
491 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
492 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
493 /* The code gen buffer location may have constraints depending on
494 the host cpu and OS */
495 #if defined(__linux__)
500 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
501 #if defined(__x86_64__)
503 /* Cannot map more than that */
504 if (code_gen_buffer_size
> (800 * 1024 * 1024))
505 code_gen_buffer_size
= (800 * 1024 * 1024);
506 #elif defined(__sparc_v9__)
507 // Map the buffer below 2G, so we can use direct calls and branches
509 start
= (void *) 0x60000000UL
;
510 if (code_gen_buffer_size
> (512 * 1024 * 1024))
511 code_gen_buffer_size
= (512 * 1024 * 1024);
512 #elif defined(__arm__)
513 /* Keep the buffer no bigger than 16GB to branch between blocks */
514 if (code_gen_buffer_size
> 16 * 1024 * 1024)
515 code_gen_buffer_size
= 16 * 1024 * 1024;
516 #elif defined(__s390x__)
517 /* Map the buffer so that we can use direct calls and branches. */
518 /* We have a +- 4GB range on the branches; leave some slop. */
519 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
520 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
522 start
= (void *)0x90000000UL
;
524 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
525 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
527 if (code_gen_buffer
== MAP_FAILED
) {
528 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
532 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
533 || defined(__DragonFly__) || defined(__OpenBSD__) \
534 || defined(__NetBSD__)
538 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
539 #if defined(__x86_64__)
540 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
541 * 0x40000000 is free */
543 addr
= (void *)0x40000000;
544 /* Cannot map more than that */
545 if (code_gen_buffer_size
> (800 * 1024 * 1024))
546 code_gen_buffer_size
= (800 * 1024 * 1024);
547 #elif defined(__sparc_v9__)
548 // Map the buffer below 2G, so we can use direct calls and branches
550 addr
= (void *) 0x60000000UL
;
551 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
552 code_gen_buffer_size
= (512 * 1024 * 1024);
555 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
556 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
558 if (code_gen_buffer
== MAP_FAILED
) {
559 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
564 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
565 map_exec(code_gen_buffer
, code_gen_buffer_size
);
567 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
568 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
569 code_gen_buffer_max_size
= code_gen_buffer_size
-
570 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
571 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
572 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
575 /* Must be called before using the QEMU cpus. 'tb_size' is the size
576 (in bytes) allocated to the translation buffer. Zero means default
578 void tcg_exec_init(unsigned long tb_size
)
581 code_gen_alloc(tb_size
);
582 code_gen_ptr
= code_gen_buffer
;
584 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx
);
591 bool tcg_enabled(void)
593 return code_gen_buffer
!= NULL
;
596 void cpu_exec_init_all(void)
598 #if !defined(CONFIG_USER_ONLY)
604 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606 static int cpu_common_post_load(void *opaque
, int version_id
)
608 CPUState
*env
= opaque
;
610 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
611 version_id is increased. */
612 env
->interrupt_request
&= ~0x01;
618 static const VMStateDescription vmstate_cpu_common
= {
619 .name
= "cpu_common",
621 .minimum_version_id
= 1,
622 .minimum_version_id_old
= 1,
623 .post_load
= cpu_common_post_load
,
624 .fields
= (VMStateField
[]) {
625 VMSTATE_UINT32(halted
, CPUState
),
626 VMSTATE_UINT32(interrupt_request
, CPUState
),
627 VMSTATE_END_OF_LIST()
632 CPUState
*qemu_get_cpu(int cpu
)
634 CPUState
*env
= first_cpu
;
637 if (env
->cpu_index
== cpu
)
645 void cpu_exec_init(CPUState
*env
)
650 #if defined(CONFIG_USER_ONLY)
653 env
->next_cpu
= NULL
;
656 while (*penv
!= NULL
) {
657 penv
= &(*penv
)->next_cpu
;
660 env
->cpu_index
= cpu_index
;
662 QTAILQ_INIT(&env
->breakpoints
);
663 QTAILQ_INIT(&env
->watchpoints
);
664 #ifndef CONFIG_USER_ONLY
665 env
->thread_id
= qemu_get_thread_id();
668 #if defined(CONFIG_USER_ONLY)
671 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
672 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
673 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
674 cpu_save
, cpu_load
, env
);
678 /* Allocate a new translation block. Flush the translation buffer if
679 too many translation blocks or too much generated code. */
680 static TranslationBlock
*tb_alloc(target_ulong pc
)
682 TranslationBlock
*tb
;
684 if (nb_tbs
>= code_gen_max_blocks
||
685 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
693 void tb_free(TranslationBlock
*tb
)
695 /* In practice this is mostly used for single use temporary TB
696 Ignore the hard cases and just back up if this TB happens to
697 be the last one generated. */
698 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
699 code_gen_ptr
= tb
->tc_ptr
;
704 static inline void invalidate_page_bitmap(PageDesc
*p
)
706 if (p
->code_bitmap
) {
707 g_free(p
->code_bitmap
);
708 p
->code_bitmap
= NULL
;
710 p
->code_write_count
= 0;
713 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
715 static void page_flush_tb_1 (int level
, void **lp
)
724 for (i
= 0; i
< L2_SIZE
; ++i
) {
725 pd
[i
].first_tb
= NULL
;
726 invalidate_page_bitmap(pd
+ i
);
730 for (i
= 0; i
< L2_SIZE
; ++i
) {
731 page_flush_tb_1 (level
- 1, pp
+ i
);
736 static void page_flush_tb(void)
739 for (i
= 0; i
< V_L1_SIZE
; i
++) {
740 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
744 /* flush all the translation blocks */
745 /* XXX: tb_flush is currently not thread safe */
746 void tb_flush(CPUState
*env1
)
749 #if defined(DEBUG_FLUSH)
750 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
751 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
753 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
755 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
756 cpu_abort(env1
, "Internal error: code buffer overflow\n");
760 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
761 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
764 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
767 code_gen_ptr
= code_gen_buffer
;
768 /* XXX: flush processor icache at this point if cache flush is
773 #ifdef DEBUG_TB_CHECK
775 static void tb_invalidate_check(target_ulong address
)
777 TranslationBlock
*tb
;
779 address
&= TARGET_PAGE_MASK
;
780 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
781 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
782 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
783 address
>= tb
->pc
+ tb
->size
)) {
784 printf("ERROR invalidate: address=" TARGET_FMT_lx
785 " PC=%08lx size=%04x\n",
786 address
, (long)tb
->pc
, tb
->size
);
792 /* verify that all the pages have correct rights for code */
793 static void tb_page_check(void)
795 TranslationBlock
*tb
;
796 int i
, flags1
, flags2
;
798 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
799 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
800 flags1
= page_get_flags(tb
->pc
);
801 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
802 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
803 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
804 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
812 /* invalidate one TB */
813 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
816 TranslationBlock
*tb1
;
820 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
823 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
827 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
829 TranslationBlock
*tb1
;
835 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
837 *ptb
= tb1
->page_next
[n1
];
840 ptb
= &tb1
->page_next
[n1
];
844 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
846 TranslationBlock
*tb1
, **ptb
;
849 ptb
= &tb
->jmp_next
[n
];
852 /* find tb(n) in circular list */
856 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
857 if (n1
== n
&& tb1
== tb
)
860 ptb
= &tb1
->jmp_first
;
862 ptb
= &tb1
->jmp_next
[n1
];
865 /* now we can suppress tb(n) from the list */
866 *ptb
= tb
->jmp_next
[n
];
868 tb
->jmp_next
[n
] = NULL
;
872 /* reset the jump entry 'n' of a TB so that it is not chained to
874 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
876 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
879 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
884 tb_page_addr_t phys_pc
;
885 TranslationBlock
*tb1
, *tb2
;
887 /* remove the TB from the hash list */
888 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
889 h
= tb_phys_hash_func(phys_pc
);
890 tb_remove(&tb_phys_hash
[h
], tb
,
891 offsetof(TranslationBlock
, phys_hash_next
));
893 /* remove the TB from the page list */
894 if (tb
->page_addr
[0] != page_addr
) {
895 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
896 tb_page_remove(&p
->first_tb
, tb
);
897 invalidate_page_bitmap(p
);
899 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
900 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
901 tb_page_remove(&p
->first_tb
, tb
);
902 invalidate_page_bitmap(p
);
905 tb_invalidated_flag
= 1;
907 /* remove the TB from the hash list */
908 h
= tb_jmp_cache_hash_func(tb
->pc
);
909 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
910 if (env
->tb_jmp_cache
[h
] == tb
)
911 env
->tb_jmp_cache
[h
] = NULL
;
914 /* suppress this TB from the two jump lists */
915 tb_jmp_remove(tb
, 0);
916 tb_jmp_remove(tb
, 1);
918 /* suppress any remaining jumps to this TB */
924 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
925 tb2
= tb1
->jmp_next
[n1
];
926 tb_reset_jump(tb1
, n1
);
927 tb1
->jmp_next
[n1
] = NULL
;
930 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
932 tb_phys_invalidate_count
++;
935 static inline void set_bits(uint8_t *tab
, int start
, int len
)
941 mask
= 0xff << (start
& 7);
942 if ((start
& ~7) == (end
& ~7)) {
944 mask
&= ~(0xff << (end
& 7));
949 start
= (start
+ 8) & ~7;
951 while (start
< end1
) {
956 mask
= ~(0xff << (end
& 7));
962 static void build_page_bitmap(PageDesc
*p
)
964 int n
, tb_start
, tb_end
;
965 TranslationBlock
*tb
;
967 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
972 tb
= (TranslationBlock
*)((long)tb
& ~3);
973 /* NOTE: this is subtle as a TB may span two physical pages */
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
978 tb_end
= tb_start
+ tb
->size
;
979 if (tb_end
> TARGET_PAGE_SIZE
)
980 tb_end
= TARGET_PAGE_SIZE
;
983 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
985 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
986 tb
= tb
->page_next
[n
];
990 TranslationBlock
*tb_gen_code(CPUState
*env
,
991 target_ulong pc
, target_ulong cs_base
,
992 int flags
, int cflags
)
994 TranslationBlock
*tb
;
996 tb_page_addr_t phys_pc
, phys_page2
;
997 target_ulong virt_page2
;
1000 phys_pc
= get_page_addr_code(env
, pc
);
1003 /* flush must be done */
1005 /* cannot fail at this point */
1007 /* Don't forget to invalidate previous TB info. */
1008 tb_invalidated_flag
= 1;
1010 tc_ptr
= code_gen_ptr
;
1011 tb
->tc_ptr
= tc_ptr
;
1012 tb
->cs_base
= cs_base
;
1014 tb
->cflags
= cflags
;
1015 cpu_gen_code(env
, tb
, &code_gen_size
);
1016 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1018 /* check next page if needed */
1019 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1021 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1022 phys_page2
= get_page_addr_code(env
, virt_page2
);
1024 tb_link_page(tb
, phys_pc
, phys_page2
);
1028 /* invalidate all TBs which intersect with the target physical page
1029 starting in range [start;end[. NOTE: start and end must refer to
1030 the same physical page. 'is_cpu_write_access' should be true if called
1031 from a real cpu write access: the virtual CPU will exit the current
1032 TB if code is modified inside this TB. */
1033 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1034 int is_cpu_write_access
)
1036 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1037 CPUState
*env
= cpu_single_env
;
1038 tb_page_addr_t tb_start
, tb_end
;
1041 #ifdef TARGET_HAS_PRECISE_SMC
1042 int current_tb_not_found
= is_cpu_write_access
;
1043 TranslationBlock
*current_tb
= NULL
;
1044 int current_tb_modified
= 0;
1045 target_ulong current_pc
= 0;
1046 target_ulong current_cs_base
= 0;
1047 int current_flags
= 0;
1048 #endif /* TARGET_HAS_PRECISE_SMC */
1050 p
= page_find(start
>> TARGET_PAGE_BITS
);
1053 if (!p
->code_bitmap
&&
1054 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1055 is_cpu_write_access
) {
1056 /* build code bitmap */
1057 build_page_bitmap(p
);
1060 /* we remove all the TBs in the range [start, end[ */
1061 /* XXX: see if in some cases it could be faster to invalidate all the code */
1063 while (tb
!= NULL
) {
1065 tb
= (TranslationBlock
*)((long)tb
& ~3);
1066 tb_next
= tb
->page_next
[n
];
1067 /* NOTE: this is subtle as a TB may span two physical pages */
1069 /* NOTE: tb_end may be after the end of the page, but
1070 it is not a problem */
1071 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1072 tb_end
= tb_start
+ tb
->size
;
1074 tb_start
= tb
->page_addr
[1];
1075 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1077 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1078 #ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_not_found
) {
1080 current_tb_not_found
= 0;
1082 if (env
->mem_io_pc
) {
1083 /* now we have a real cpu fault */
1084 current_tb
= tb_find_pc(env
->mem_io_pc
);
1087 if (current_tb
== tb
&&
1088 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1089 /* If we are modifying the current TB, we must stop
1090 its execution. We could be more precise by checking
1091 that the modification is after the current PC, but it
1092 would require a specialized function to partially
1093 restore the CPU state */
1095 current_tb_modified
= 1;
1096 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1097 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1100 #endif /* TARGET_HAS_PRECISE_SMC */
1101 /* we need to do that to handle the case where a signal
1102 occurs while doing tb_phys_invalidate() */
1105 saved_tb
= env
->current_tb
;
1106 env
->current_tb
= NULL
;
1108 tb_phys_invalidate(tb
, -1);
1110 env
->current_tb
= saved_tb
;
1111 if (env
->interrupt_request
&& env
->current_tb
)
1112 cpu_interrupt(env
, env
->interrupt_request
);
1117 #if !defined(CONFIG_USER_ONLY)
1118 /* if no code remaining, no need to continue to use slow writes */
1120 invalidate_page_bitmap(p
);
1121 if (is_cpu_write_access
) {
1122 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1126 #ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified
) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1131 env
->current_tb
= NULL
;
1132 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1133 cpu_resume_from_signal(env
, NULL
);
1138 /* len must be <= 8 and start must be a multiple of len */
1139 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1145 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1146 cpu_single_env
->mem_io_vaddr
, len
,
1147 cpu_single_env
->eip
,
1148 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1151 p
= page_find(start
>> TARGET_PAGE_BITS
);
1154 if (p
->code_bitmap
) {
1155 offset
= start
& ~TARGET_PAGE_MASK
;
1156 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1157 if (b
& ((1 << len
) - 1))
1161 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1165 #if !defined(CONFIG_SOFTMMU)
1166 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1167 unsigned long pc
, void *puc
)
1169 TranslationBlock
*tb
;
1172 #ifdef TARGET_HAS_PRECISE_SMC
1173 TranslationBlock
*current_tb
= NULL
;
1174 CPUState
*env
= cpu_single_env
;
1175 int current_tb_modified
= 0;
1176 target_ulong current_pc
= 0;
1177 target_ulong current_cs_base
= 0;
1178 int current_flags
= 0;
1181 addr
&= TARGET_PAGE_MASK
;
1182 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1186 #ifdef TARGET_HAS_PRECISE_SMC
1187 if (tb
&& pc
!= 0) {
1188 current_tb
= tb_find_pc(pc
);
1191 while (tb
!= NULL
) {
1193 tb
= (TranslationBlock
*)((long)tb
& ~3);
1194 #ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb
== tb
&&
1196 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
1203 current_tb_modified
= 1;
1204 cpu_restore_state(current_tb
, env
, pc
);
1205 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1208 #endif /* TARGET_HAS_PRECISE_SMC */
1209 tb_phys_invalidate(tb
, addr
);
1210 tb
= tb
->page_next
[n
];
1213 #ifdef TARGET_HAS_PRECISE_SMC
1214 if (current_tb_modified
) {
1215 /* we generate a block containing just the instruction
1216 modifying the memory. It will ensure that it cannot modify
1218 env
->current_tb
= NULL
;
1219 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1220 cpu_resume_from_signal(env
, puc
);
1226 /* add the tb in the target page and protect it if necessary */
1227 static inline void tb_alloc_page(TranslationBlock
*tb
,
1228 unsigned int n
, tb_page_addr_t page_addr
)
1231 #ifndef CONFIG_USER_ONLY
1232 bool page_already_protected
;
1235 tb
->page_addr
[n
] = page_addr
;
1236 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1237 tb
->page_next
[n
] = p
->first_tb
;
1238 #ifndef CONFIG_USER_ONLY
1239 page_already_protected
= p
->first_tb
!= NULL
;
1241 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1242 invalidate_page_bitmap(p
);
1244 #if defined(TARGET_HAS_SMC) || 1
1246 #if defined(CONFIG_USER_ONLY)
1247 if (p
->flags
& PAGE_WRITE
) {
1252 /* force the host page as non writable (writes will have a
1253 page fault + mprotect overhead) */
1254 page_addr
&= qemu_host_page_mask
;
1256 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1257 addr
+= TARGET_PAGE_SIZE
) {
1259 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1263 p2
->flags
&= ~PAGE_WRITE
;
1265 mprotect(g2h(page_addr
), qemu_host_page_size
,
1266 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1267 #ifdef DEBUG_TB_INVALIDATE
1268 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1273 /* if some code is already present, then the pages are already
1274 protected. So we handle the case where only the first TB is
1275 allocated in a physical page */
1276 if (!page_already_protected
) {
1277 tlb_protect_code(page_addr
);
1281 #endif /* TARGET_HAS_SMC */
1284 /* add a new TB and link it to the physical page tables. phys_page2 is
1285 (-1) to indicate that only one page contains the TB. */
1286 void tb_link_page(TranslationBlock
*tb
,
1287 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1290 TranslationBlock
**ptb
;
1292 /* Grab the mmap lock to stop another thread invalidating this TB
1293 before we are done. */
1295 /* add in the physical hash table */
1296 h
= tb_phys_hash_func(phys_pc
);
1297 ptb
= &tb_phys_hash
[h
];
1298 tb
->phys_hash_next
= *ptb
;
1301 /* add in the page list */
1302 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1303 if (phys_page2
!= -1)
1304 tb_alloc_page(tb
, 1, phys_page2
);
1306 tb
->page_addr
[1] = -1;
1308 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1309 tb
->jmp_next
[0] = NULL
;
1310 tb
->jmp_next
[1] = NULL
;
1312 /* init original jump addresses */
1313 if (tb
->tb_next_offset
[0] != 0xffff)
1314 tb_reset_jump(tb
, 0);
1315 if (tb
->tb_next_offset
[1] != 0xffff)
1316 tb_reset_jump(tb
, 1);
1318 #ifdef DEBUG_TB_CHECK
1324 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1328 int m_min
, m_max
, m
;
1330 TranslationBlock
*tb
;
1334 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1335 tc_ptr
>= (unsigned long)code_gen_ptr
)
1337 /* binary search (cf Knuth) */
1340 while (m_min
<= m_max
) {
1341 m
= (m_min
+ m_max
) >> 1;
1343 v
= (unsigned long)tb
->tc_ptr
;
1346 else if (tc_ptr
< v
) {
1355 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1357 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1359 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1362 tb1
= tb
->jmp_next
[n
];
1364 /* find head of list */
1367 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1370 tb1
= tb1
->jmp_next
[n1
];
1372 /* we are now sure now that tb jumps to tb1 */
1375 /* remove tb from the jmp_first list */
1376 ptb
= &tb_next
->jmp_first
;
1380 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1381 if (n1
== n
&& tb1
== tb
)
1383 ptb
= &tb1
->jmp_next
[n1
];
1385 *ptb
= tb
->jmp_next
[n
];
1386 tb
->jmp_next
[n
] = NULL
;
1388 /* suppress the jump to next tb in generated code */
1389 tb_reset_jump(tb
, n
);
1391 /* suppress jumps in the tb on which we could have jumped */
1392 tb_reset_jump_recursive(tb_next
);
1396 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1398 tb_reset_jump_recursive2(tb
, 0);
1399 tb_reset_jump_recursive2(tb
, 1);
1402 #if defined(TARGET_HAS_ICE)
1403 #if defined(CONFIG_USER_ONLY)
1404 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1406 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1409 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1411 target_phys_addr_t addr
;
1413 ram_addr_t ram_addr
;
1416 addr
= cpu_get_phys_page_debug(env
, pc
);
1417 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1419 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1420 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1423 #endif /* TARGET_HAS_ICE */
1425 #if defined(CONFIG_USER_ONLY)
1426 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1431 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1432 int flags
, CPUWatchpoint
**watchpoint
)
1437 /* Add a watchpoint. */
1438 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1439 int flags
, CPUWatchpoint
**watchpoint
)
1441 target_ulong len_mask
= ~(len
- 1);
1444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1446 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1450 wp
= g_malloc(sizeof(*wp
));
1453 wp
->len_mask
= len_mask
;
1456 /* keep all GDB-injected watchpoints in front */
1458 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1460 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1462 tlb_flush_page(env
, addr
);
1469 /* Remove a specific watchpoint. */
1470 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1473 target_ulong len_mask
= ~(len
- 1);
1476 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1477 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1478 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1479 cpu_watchpoint_remove_by_ref(env
, wp
);
1486 /* Remove a specific watchpoint by reference. */
1487 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1489 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1491 tlb_flush_page(env
, watchpoint
->vaddr
);
1496 /* Remove all matching watchpoints. */
1497 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1499 CPUWatchpoint
*wp
, *next
;
1501 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1502 if (wp
->flags
& mask
)
1503 cpu_watchpoint_remove_by_ref(env
, wp
);
1508 /* Add a breakpoint. */
1509 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1510 CPUBreakpoint
**breakpoint
)
1512 #if defined(TARGET_HAS_ICE)
1515 bp
= g_malloc(sizeof(*bp
));
1520 /* keep all GDB-injected breakpoints in front */
1522 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1524 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1526 breakpoint_invalidate(env
, pc
);
1536 /* Remove a specific breakpoint. */
1537 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1539 #if defined(TARGET_HAS_ICE)
1542 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1543 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1544 cpu_breakpoint_remove_by_ref(env
, bp
);
1554 /* Remove a specific breakpoint by reference. */
1555 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1557 #if defined(TARGET_HAS_ICE)
1558 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1560 breakpoint_invalidate(env
, breakpoint
->pc
);
1566 /* Remove all matching breakpoints. */
1567 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1569 #if defined(TARGET_HAS_ICE)
1570 CPUBreakpoint
*bp
, *next
;
1572 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1573 if (bp
->flags
& mask
)
1574 cpu_breakpoint_remove_by_ref(env
, bp
);
1579 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581 void cpu_single_step(CPUState
*env
, int enabled
)
1583 #if defined(TARGET_HAS_ICE)
1584 if (env
->singlestep_enabled
!= enabled
) {
1585 env
->singlestep_enabled
= enabled
;
1587 kvm_update_guest_debug(env
, 0);
1589 /* must flush all the translated code to avoid inconsistencies */
1590 /* XXX: only flush what is necessary */
1597 /* enable or disable low levels log */
1598 void cpu_set_log(int log_flags
)
1600 loglevel
= log_flags
;
1601 if (loglevel
&& !logfile
) {
1602 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1604 perror(logfilename
);
1607 #if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 static char logfile_buf
[4096];
1611 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1613 #elif defined(_WIN32)
1614 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1615 setvbuf(logfile
, NULL
, _IONBF
, 0);
1617 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1621 if (!loglevel
&& logfile
) {
1627 void cpu_set_log_filename(const char *filename
)
1629 logfilename
= strdup(filename
);
1634 cpu_set_log(loglevel
);
1637 static void cpu_unlink_tb(CPUState
*env
)
1639 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1640 problem and hope the cpu will stop of its own accord. For userspace
1641 emulation this often isn't actually as bad as it sounds. Often
1642 signals are used primarily to interrupt blocking syscalls. */
1643 TranslationBlock
*tb
;
1644 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1646 spin_lock(&interrupt_lock
);
1647 tb
= env
->current_tb
;
1648 /* if the cpu is currently executing code, we must unlink it and
1649 all the potentially executing TB */
1651 env
->current_tb
= NULL
;
1652 tb_reset_jump_recursive(tb
);
1654 spin_unlock(&interrupt_lock
);
1657 #ifndef CONFIG_USER_ONLY
1658 /* mask must never be zero, except for A20 change call */
1659 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1663 old_mask
= env
->interrupt_request
;
1664 env
->interrupt_request
|= mask
;
1667 * If called from iothread context, wake the target cpu in
1670 if (!qemu_cpu_is_self(env
)) {
1676 env
->icount_decr
.u16
.high
= 0xffff;
1678 && (mask
& ~old_mask
) != 0) {
1679 cpu_abort(env
, "Raised interrupt while not in I/O function");
1686 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1688 #else /* CONFIG_USER_ONLY */
1690 void cpu_interrupt(CPUState
*env
, int mask
)
1692 env
->interrupt_request
|= mask
;
1695 #endif /* CONFIG_USER_ONLY */
1697 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1699 env
->interrupt_request
&= ~mask
;
1702 void cpu_exit(CPUState
*env
)
1704 env
->exit_request
= 1;
1708 const CPULogItem cpu_log_items
[] = {
1709 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1710 "show generated host assembly code for each compiled TB" },
1711 { CPU_LOG_TB_IN_ASM
, "in_asm",
1712 "show target assembly code for each compiled TB" },
1713 { CPU_LOG_TB_OP
, "op",
1714 "show micro ops for each compiled TB" },
1715 { CPU_LOG_TB_OP_OPT
, "op_opt",
1718 "before eflags optimization and "
1720 "after liveness analysis" },
1721 { CPU_LOG_INT
, "int",
1722 "show interrupts/exceptions in short format" },
1723 { CPU_LOG_EXEC
, "exec",
1724 "show trace before each executed TB (lots of logs)" },
1725 { CPU_LOG_TB_CPU
, "cpu",
1726 "show CPU state before block translation" },
1728 { CPU_LOG_PCALL
, "pcall",
1729 "show protected mode far calls/returns/exceptions" },
1730 { CPU_LOG_RESET
, "cpu_reset",
1731 "show CPU state before CPU resets" },
1734 { CPU_LOG_IOPORT
, "ioport",
1735 "show all i/o ports accesses" },
1740 static int cmp1(const char *s1
, int n
, const char *s2
)
1742 if (strlen(s2
) != n
)
1744 return memcmp(s1
, s2
, n
) == 0;
1747 /* takes a comma separated list of log masks. Return 0 if error. */
1748 int cpu_str_to_log_mask(const char *str
)
1750 const CPULogItem
*item
;
1757 p1
= strchr(p
, ',');
1760 if(cmp1(p
,p1
-p
,"all")) {
1761 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1765 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1766 if (cmp1(p
, p1
- p
, item
->name
))
1780 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1787 fprintf(stderr
, "qemu: fatal: ");
1788 vfprintf(stderr
, fmt
, ap
);
1789 fprintf(stderr
, "\n");
1791 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1793 cpu_dump_state(env
, stderr
, fprintf
, 0);
1795 if (qemu_log_enabled()) {
1796 qemu_log("qemu: fatal: ");
1797 qemu_log_vprintf(fmt
, ap2
);
1800 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1802 log_cpu_state(env
, 0);
1809 #if defined(CONFIG_USER_ONLY)
1811 struct sigaction act
;
1812 sigfillset(&act
.sa_mask
);
1813 act
.sa_handler
= SIG_DFL
;
1814 sigaction(SIGABRT
, &act
, NULL
);
1820 CPUState
*cpu_copy(CPUState
*env
)
1822 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1823 CPUState
*next_cpu
= new_env
->next_cpu
;
1824 int cpu_index
= new_env
->cpu_index
;
1825 #if defined(TARGET_HAS_ICE)
1830 memcpy(new_env
, env
, sizeof(CPUState
));
1832 /* Preserve chaining and index. */
1833 new_env
->next_cpu
= next_cpu
;
1834 new_env
->cpu_index
= cpu_index
;
1836 /* Clone all break/watchpoints.
1837 Note: Once we support ptrace with hw-debug register access, make sure
1838 BP_CPU break/watchpoints are handled correctly on clone. */
1839 QTAILQ_INIT(&env
->breakpoints
);
1840 QTAILQ_INIT(&env
->watchpoints
);
1841 #if defined(TARGET_HAS_ICE)
1842 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1843 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1845 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1846 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1854 #if !defined(CONFIG_USER_ONLY)
1856 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1860 /* Discard jump cache entries for any tb which might potentially
1861 overlap the flushed page. */
1862 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1863 memset (&env
->tb_jmp_cache
[i
], 0,
1864 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1866 i
= tb_jmp_cache_hash_page(addr
);
1867 memset (&env
->tb_jmp_cache
[i
], 0,
1868 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1871 static CPUTLBEntry s_cputlb_empty_entry
= {
1878 /* NOTE: if flush_global is true, also flush global entries (not
1880 void tlb_flush(CPUState
*env
, int flush_global
)
1884 #if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env
->current_tb
= NULL
;
1891 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1893 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1894 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1898 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1900 env
->tlb_flush_addr
= -1;
1901 env
->tlb_flush_mask
= 0;
1905 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1907 if (addr
== (tlb_entry
->addr_read
&
1908 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1909 addr
== (tlb_entry
->addr_write
&
1910 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1911 addr
== (tlb_entry
->addr_code
&
1912 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1913 *tlb_entry
= s_cputlb_empty_entry
;
1917 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1922 #if defined(DEBUG_TLB)
1923 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1927 #if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1930 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env
->current_tb
= NULL
;
1939 addr
&= TARGET_PAGE_MASK
;
1940 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1941 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1942 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1944 tlb_flush_jmp_cache(env
, addr
);
1947 /* update the TLBs so that writes to code in the virtual page 'addr'
1949 static void tlb_protect_code(ram_addr_t ram_addr
)
1951 cpu_physical_memory_reset_dirty(ram_addr
,
1952 ram_addr
+ TARGET_PAGE_SIZE
,
1956 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1957 tested for self modifying code */
1958 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1961 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1964 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1965 unsigned long start
, unsigned long length
)
1968 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1969 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1970 if ((addr
- start
) < length
) {
1971 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1976 /* Note: start and end must be within the same ram block. */
1977 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1981 unsigned long length
, start1
;
1984 start
&= TARGET_PAGE_MASK
;
1985 end
= TARGET_PAGE_ALIGN(end
);
1987 length
= end
- start
;
1990 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
1994 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
1995 /* Check that we don't span multiple blocks - this breaks the
1996 address comparisons below. */
1997 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
1998 != (end
- 1) - start
) {
2002 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2004 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2005 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2006 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2012 int cpu_physical_memory_set_dirty_tracking(int enable
)
2015 in_migration
= enable
;
2019 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2021 ram_addr_t ram_addr
;
2024 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2025 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2026 + tlb_entry
->addend
);
2027 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2028 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2029 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2034 /* update the TLB according to the current state of the dirty bits */
2035 void cpu_tlb_update_dirty(CPUState
*env
)
2039 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2040 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2041 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2045 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2047 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2048 tlb_entry
->addr_write
= vaddr
;
2051 /* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2058 vaddr
&= TARGET_PAGE_MASK
;
2059 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2060 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2061 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2064 /* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2069 target_ulong mask
= ~(size
- 1);
2071 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2072 env
->tlb_flush_addr
= vaddr
& mask
;
2073 env
->tlb_flush_mask
= mask
;
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask
&= env
->tlb_flush_mask
;
2080 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2083 env
->tlb_flush_addr
&= mask
;
2084 env
->tlb_flush_mask
= mask
;
2087 /* Add a new TLB entry. At most one entry for a given virtual address
2088 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2089 supplied size is only used by tlb_flush_page. */
2090 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2091 target_phys_addr_t paddr
, int prot
,
2092 int mmu_idx
, target_ulong size
)
2097 target_ulong address
;
2098 target_ulong code_address
;
2099 unsigned long addend
;
2102 target_phys_addr_t iotlb
;
2104 assert(size
>= TARGET_PAGE_SIZE
);
2105 if (size
!= TARGET_PAGE_SIZE
) {
2106 tlb_add_large_page(env
, vaddr
, size
);
2108 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2110 #if defined(DEBUG_TLB)
2111 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2112 " prot=%x idx=%d pd=0x%08lx\n",
2113 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2117 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2118 /* IO memory case (romd handled later) */
2119 address
|= TLB_MMIO
;
2121 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2122 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2124 iotlb
= pd
& TARGET_PAGE_MASK
;
2125 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2126 iotlb
|= IO_MEM_NOTDIRTY
;
2128 iotlb
|= IO_MEM_ROM
;
2130 /* IO handlers are currently passed a physical address.
2131 It would be nice to pass an offset from the base address
2132 of that region. This would avoid having to special case RAM,
2133 and avoid full address decoding in every device.
2134 We can't use the high bits of pd for this because
2135 IO_MEM_ROMD uses these as a ram address. */
2136 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2137 iotlb
+= p
.region_offset
;
2140 code_address
= address
;
2141 /* Make accesses to pages with watchpoints go via the
2142 watchpoint trap routines. */
2143 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2144 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2145 /* Avoid trapping reads of pages with a write breakpoint. */
2146 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2147 iotlb
= io_mem_watch
+ paddr
;
2148 address
|= TLB_MMIO
;
2154 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2155 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2156 te
= &env
->tlb_table
[mmu_idx
][index
];
2157 te
->addend
= addend
- vaddr
;
2158 if (prot
& PAGE_READ
) {
2159 te
->addr_read
= address
;
2164 if (prot
& PAGE_EXEC
) {
2165 te
->addr_code
= code_address
;
2169 if (prot
& PAGE_WRITE
) {
2170 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2171 (pd
& IO_MEM_ROMD
)) {
2172 /* Write access calls the I/O callback. */
2173 te
->addr_write
= address
| TLB_MMIO
;
2174 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2175 !cpu_physical_memory_is_dirty(pd
)) {
2176 te
->addr_write
= address
| TLB_NOTDIRTY
;
2178 te
->addr_write
= address
;
2181 te
->addr_write
= -1;
2187 void tlb_flush(CPUState
*env
, int flush_global
)
2191 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2196 * Walks guest process memory "regions" one by one
2197 * and calls callback function 'fn' for each region.
2200 struct walk_memory_regions_data
2202 walk_memory_regions_fn fn
;
2204 unsigned long start
;
2208 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2209 abi_ulong end
, int new_prot
)
2211 if (data
->start
!= -1ul) {
2212 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2218 data
->start
= (new_prot
? end
: -1ul);
2219 data
->prot
= new_prot
;
2224 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2225 abi_ulong base
, int level
, void **lp
)
2231 return walk_memory_regions_end(data
, base
, 0);
2236 for (i
= 0; i
< L2_SIZE
; ++i
) {
2237 int prot
= pd
[i
].flags
;
2239 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2240 if (prot
!= data
->prot
) {
2241 rc
= walk_memory_regions_end(data
, pa
, prot
);
2249 for (i
= 0; i
< L2_SIZE
; ++i
) {
2250 pa
= base
| ((abi_ulong
)i
<<
2251 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2252 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2262 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2264 struct walk_memory_regions_data data
;
2272 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2273 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2274 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2280 return walk_memory_regions_end(&data
, 0, 0);
2283 static int dump_region(void *priv
, abi_ulong start
,
2284 abi_ulong end
, unsigned long prot
)
2286 FILE *f
= (FILE *)priv
;
2288 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2289 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2290 start
, end
, end
- start
,
2291 ((prot
& PAGE_READ
) ? 'r' : '-'),
2292 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2293 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2298 /* dump memory mappings */
2299 void page_dump(FILE *f
)
2301 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2302 "start", "end", "size", "prot");
2303 walk_memory_regions(f
, dump_region
);
2306 int page_get_flags(target_ulong address
)
2310 p
= page_find(address
>> TARGET_PAGE_BITS
);
2316 /* Modify the flags of a page and invalidate the code if necessary.
2317 The flag PAGE_WRITE_ORG is positioned automatically depending
2318 on PAGE_WRITE. The mmap_lock should already be held. */
2319 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2321 target_ulong addr
, len
;
2323 /* This function should never be called with addresses outside the
2324 guest address space. If this assert fires, it probably indicates
2325 a missing call to h2g_valid. */
2326 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2327 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2329 assert(start
< end
);
2331 start
= start
& TARGET_PAGE_MASK
;
2332 end
= TARGET_PAGE_ALIGN(end
);
2334 if (flags
& PAGE_WRITE
) {
2335 flags
|= PAGE_WRITE_ORG
;
2338 for (addr
= start
, len
= end
- start
;
2340 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2341 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2343 /* If the write protection bit is set, then we invalidate
2345 if (!(p
->flags
& PAGE_WRITE
) &&
2346 (flags
& PAGE_WRITE
) &&
2348 tb_invalidate_phys_page(addr
, 0, NULL
);
2354 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2360 /* This function should never be called with addresses outside the
2361 guest address space. If this assert fires, it probably indicates
2362 a missing call to h2g_valid. */
2363 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2364 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2370 if (start
+ len
- 1 < start
) {
2371 /* We've wrapped around. */
2375 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2376 start
= start
& TARGET_PAGE_MASK
;
2378 for (addr
= start
, len
= end
- start
;
2380 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2381 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2384 if( !(p
->flags
& PAGE_VALID
) )
2387 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2389 if (flags
& PAGE_WRITE
) {
2390 if (!(p
->flags
& PAGE_WRITE_ORG
))
2392 /* unprotect the page if it was put read-only because it
2393 contains translated code */
2394 if (!(p
->flags
& PAGE_WRITE
)) {
2395 if (!page_unprotect(addr
, 0, NULL
))
2404 /* called from signal handler: invalidate the code and unprotect the
2405 page. Return TRUE if the fault was successfully handled. */
2406 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2410 target_ulong host_start
, host_end
, addr
;
2412 /* Technically this isn't safe inside a signal handler. However we
2413 know this only ever happens in a synchronous SEGV handler, so in
2414 practice it seems to be ok. */
2417 p
= page_find(address
>> TARGET_PAGE_BITS
);
2423 /* if the page was really writable, then we change its
2424 protection back to writable */
2425 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2426 host_start
= address
& qemu_host_page_mask
;
2427 host_end
= host_start
+ qemu_host_page_size
;
2430 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2431 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2432 p
->flags
|= PAGE_WRITE
;
2435 /* and since the content will be modified, we must invalidate
2436 the corresponding translated code. */
2437 tb_invalidate_phys_page(addr
, pc
, puc
);
2438 #ifdef DEBUG_TB_CHECK
2439 tb_invalidate_check(addr
);
2442 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2452 static inline void tlb_set_dirty(CPUState
*env
,
2453 unsigned long addr
, target_ulong vaddr
)
2456 #endif /* defined(CONFIG_USER_ONLY) */
2458 #if !defined(CONFIG_USER_ONLY)
2460 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2461 typedef struct subpage_t
{
2462 target_phys_addr_t base
;
2463 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2464 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2467 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2468 ram_addr_t memory
, ram_addr_t region_offset
);
2469 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2470 ram_addr_t orig_memory
,
2471 ram_addr_t region_offset
);
2472 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2475 if (addr > start_addr) \
2478 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2479 if (start_addr2 > 0) \
2483 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2484 end_addr2 = TARGET_PAGE_SIZE - 1; \
2486 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2487 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2492 /* register physical memory.
2493 For RAM, 'size' must be a multiple of the target page size.
2494 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2495 io memory page. The address used when calling the IO function is
2496 the offset from the start of the region, plus region_offset. Both
2497 start_addr and region_offset are rounded down to a page boundary
2498 before calculating this offset. This should not be a problem unless
2499 the low bits of start_addr and region_offset differ. */
2500 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2502 ram_addr_t phys_offset
,
2503 ram_addr_t region_offset
,
2506 target_phys_addr_t addr
, end_addr
;
2509 ram_addr_t orig_size
= size
;
2514 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2515 region_offset
= start_addr
;
2517 region_offset
&= TARGET_PAGE_MASK
;
2518 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2519 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2523 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 0);
2524 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2525 ram_addr_t orig_memory
= p
->phys_offset
;
2526 target_phys_addr_t start_addr2
, end_addr2
;
2527 int need_subpage
= 0;
2529 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2532 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2533 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2534 &p
->phys_offset
, orig_memory
,
2537 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2540 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2542 p
->region_offset
= 0;
2544 p
->phys_offset
= phys_offset
;
2545 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2546 (phys_offset
& IO_MEM_ROMD
))
2547 phys_offset
+= TARGET_PAGE_SIZE
;
2550 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2551 p
->phys_offset
= phys_offset
;
2552 p
->region_offset
= region_offset
;
2553 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2554 (phys_offset
& IO_MEM_ROMD
)) {
2555 phys_offset
+= TARGET_PAGE_SIZE
;
2557 target_phys_addr_t start_addr2
, end_addr2
;
2558 int need_subpage
= 0;
2560 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2561 end_addr2
, need_subpage
);
2564 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2565 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2566 addr
& TARGET_PAGE_MASK
);
2567 subpage_register(subpage
, start_addr2
, end_addr2
,
2568 phys_offset
, region_offset
);
2569 p
->region_offset
= 0;
2573 region_offset
+= TARGET_PAGE_SIZE
;
2574 addr
+= TARGET_PAGE_SIZE
;
2575 } while (addr
!= end_addr
);
2577 /* since each CPU stores ram addresses in its TLB cache, we must
2578 reset the modified entries */
2580 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2585 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2588 kvm_coalesce_mmio_region(addr
, size
);
2591 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2594 kvm_uncoalesce_mmio_region(addr
, size
);
2597 void qemu_flush_coalesced_mmio_buffer(void)
2600 kvm_flush_coalesced_mmio_buffer();
2603 #if defined(__linux__) && !defined(TARGET_S390X)
2605 #include <sys/vfs.h>
2607 #define HUGETLBFS_MAGIC 0x958458f6
2609 static long gethugepagesize(const char *path
)
2615 ret
= statfs(path
, &fs
);
2616 } while (ret
!= 0 && errno
== EINTR
);
2623 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2624 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2629 static void *file_ram_alloc(RAMBlock
*block
,
2639 unsigned long hpagesize
;
2641 hpagesize
= gethugepagesize(path
);
2646 if (memory
< hpagesize
) {
2650 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2651 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2655 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2659 fd
= mkstemp(filename
);
2661 perror("unable to create backing store for hugepages");
2668 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2671 * ftruncate is not supported by hugetlbfs in older
2672 * hosts, so don't bother bailing out on errors.
2673 * If anything goes wrong with it under other filesystems,
2676 if (ftruncate(fd
, memory
))
2677 perror("ftruncate");
2680 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2681 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2682 * to sidestep this quirk.
2684 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2685 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2687 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2689 if (area
== MAP_FAILED
) {
2690 perror("file_ram_alloc: can't mmap RAM pages");
2699 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2701 RAMBlock
*block
, *next_block
;
2702 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2704 if (QLIST_EMPTY(&ram_list
.blocks
))
2707 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2708 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2710 end
= block
->offset
+ block
->length
;
2712 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2713 if (next_block
->offset
>= end
) {
2714 next
= MIN(next
, next_block
->offset
);
2717 if (next
- end
>= size
&& next
- end
< mingap
) {
2719 mingap
= next
- end
;
2723 if (offset
== RAM_ADDR_MAX
) {
2724 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2732 static ram_addr_t
last_ram_offset(void)
2735 ram_addr_t last
= 0;
2737 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2738 last
= MAX(last
, block
->offset
+ block
->length
);
2743 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2745 RAMBlock
*new_block
, *block
;
2748 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2749 if (block
->offset
== addr
) {
2755 assert(!new_block
->idstr
[0]);
2757 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2758 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2760 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2764 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2766 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2767 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2768 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2775 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2778 RAMBlock
*new_block
;
2780 size
= TARGET_PAGE_ALIGN(size
);
2781 new_block
= g_malloc0(sizeof(*new_block
));
2784 new_block
->offset
= find_ram_offset(size
);
2786 new_block
->host
= host
;
2787 new_block
->flags
|= RAM_PREALLOC_MASK
;
2790 #if defined (__linux__) && !defined(TARGET_S390X)
2791 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2792 if (!new_block
->host
) {
2793 new_block
->host
= qemu_vmalloc(size
);
2794 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2797 fprintf(stderr
, "-mem-path option unsupported\n");
2801 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2802 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2803 an system defined value, which is at least 256GB. Larger systems
2804 have larger values. We put the guest between the end of data
2805 segment (system break) and this value. We use 32GB as a base to
2806 have enough room for the system break to grow. */
2807 new_block
->host
= mmap((void*)0x800000000, size
,
2808 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2809 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2810 if (new_block
->host
== MAP_FAILED
) {
2811 fprintf(stderr
, "Allocating RAM failed\n");
2815 if (xen_enabled()) {
2816 xen_ram_alloc(new_block
->offset
, size
, mr
);
2818 new_block
->host
= qemu_vmalloc(size
);
2821 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2824 new_block
->length
= size
;
2826 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2828 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2829 last_ram_offset() >> TARGET_PAGE_BITS
);
2830 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2831 0xff, size
>> TARGET_PAGE_BITS
);
2834 kvm_setup_guest_memory(new_block
->host
, size
);
2836 return new_block
->offset
;
2839 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2841 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2844 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2848 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2849 if (addr
== block
->offset
) {
2850 QLIST_REMOVE(block
, next
);
2857 void qemu_ram_free(ram_addr_t addr
)
2861 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2862 if (addr
== block
->offset
) {
2863 QLIST_REMOVE(block
, next
);
2864 if (block
->flags
& RAM_PREALLOC_MASK
) {
2866 } else if (mem_path
) {
2867 #if defined (__linux__) && !defined(TARGET_S390X)
2869 munmap(block
->host
, block
->length
);
2872 qemu_vfree(block
->host
);
2878 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2879 munmap(block
->host
, block
->length
);
2881 if (xen_enabled()) {
2882 xen_invalidate_map_cache_entry(block
->host
);
2884 qemu_vfree(block
->host
);
2896 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2903 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2904 offset
= addr
- block
->offset
;
2905 if (offset
< block
->length
) {
2906 vaddr
= block
->host
+ offset
;
2907 if (block
->flags
& RAM_PREALLOC_MASK
) {
2911 munmap(vaddr
, length
);
2913 #if defined(__linux__) && !defined(TARGET_S390X)
2916 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2919 flags
|= MAP_PRIVATE
;
2921 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2922 flags
, block
->fd
, offset
);
2924 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2925 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2932 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2933 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2934 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2937 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2938 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2942 if (area
!= vaddr
) {
2943 fprintf(stderr
, "Could not remap addr: "
2944 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2948 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
2954 #endif /* !_WIN32 */
2956 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2957 With the exception of the softmmu code in this file, this should
2958 only be used for local memory (e.g. video ram) that the device owns,
2959 and knows it isn't going to access beyond the end of the block.
2961 It should not be used for general purpose DMA.
2962 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2964 void *qemu_get_ram_ptr(ram_addr_t addr
)
2968 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2969 if (addr
- block
->offset
< block
->length
) {
2970 /* Move this entry to to start of the list. */
2971 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2972 QLIST_REMOVE(block
, next
);
2973 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2975 if (xen_enabled()) {
2976 /* We need to check if the requested address is in the RAM
2977 * because we don't want to map the entire memory in QEMU.
2978 * In that case just map until the end of the page.
2980 if (block
->offset
== 0) {
2981 return xen_map_cache(addr
, 0, 0);
2982 } else if (block
->host
== NULL
) {
2984 xen_map_cache(block
->offset
, block
->length
, 1);
2987 return block
->host
+ (addr
- block
->offset
);
2991 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2997 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2998 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3000 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3004 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3005 if (addr
- block
->offset
< block
->length
) {
3006 if (xen_enabled()) {
3007 /* We need to check if the requested address is in the RAM
3008 * because we don't want to map the entire memory in QEMU.
3009 * In that case just map until the end of the page.
3011 if (block
->offset
== 0) {
3012 return xen_map_cache(addr
, 0, 0);
3013 } else if (block
->host
== NULL
) {
3015 xen_map_cache(block
->offset
, block
->length
, 1);
3018 return block
->host
+ (addr
- block
->offset
);
3022 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3028 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3029 * but takes a size argument */
3030 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3035 if (xen_enabled()) {
3036 return xen_map_cache(addr
, *size
, 1);
3040 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3041 if (addr
- block
->offset
< block
->length
) {
3042 if (addr
- block
->offset
+ *size
> block
->length
)
3043 *size
= block
->length
- addr
+ block
->offset
;
3044 return block
->host
+ (addr
- block
->offset
);
3048 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3053 void qemu_put_ram_ptr(void *addr
)
3055 trace_qemu_put_ram_ptr(addr
);
3058 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3061 uint8_t *host
= ptr
;
3063 if (xen_enabled()) {
3064 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3068 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3069 /* This case append when the block is not mapped. */
3070 if (block
->host
== NULL
) {
3073 if (host
- block
->host
< block
->length
) {
3074 *ram_addr
= block
->offset
+ (host
- block
->host
);
3082 /* Some of the softmmu routines need to translate from a host pointer
3083 (typically a TLB entry) back to a ram offset. */
3084 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3086 ram_addr_t ram_addr
;
3088 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3089 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3095 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3097 #ifdef DEBUG_UNASSIGNED
3098 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3100 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3101 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 1);
3106 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3108 #ifdef DEBUG_UNASSIGNED
3109 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3111 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3112 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 2);
3117 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3119 #ifdef DEBUG_UNASSIGNED
3120 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3122 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3123 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 4);
3128 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3130 #ifdef DEBUG_UNASSIGNED
3131 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3133 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3134 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 1);
3138 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3140 #ifdef DEBUG_UNASSIGNED
3141 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3143 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3144 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 2);
3148 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3150 #ifdef DEBUG_UNASSIGNED
3151 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3153 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3154 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 4);
3158 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3159 unassigned_mem_readb
,
3160 unassigned_mem_readw
,
3161 unassigned_mem_readl
,
3164 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3165 unassigned_mem_writeb
,
3166 unassigned_mem_writew
,
3167 unassigned_mem_writel
,
3170 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3174 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3175 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3176 #if !defined(CONFIG_USER_ONLY)
3177 tb_invalidate_phys_page_fast(ram_addr
, 1);
3178 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3181 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3182 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3183 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3184 /* we remove the notdirty callback only if the code has been
3186 if (dirty_flags
== 0xff)
3187 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3190 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3194 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3195 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3196 #if !defined(CONFIG_USER_ONLY)
3197 tb_invalidate_phys_page_fast(ram_addr
, 2);
3198 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3201 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3202 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3203 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3204 /* we remove the notdirty callback only if the code has been
3206 if (dirty_flags
== 0xff)
3207 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3210 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3214 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3215 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3216 #if !defined(CONFIG_USER_ONLY)
3217 tb_invalidate_phys_page_fast(ram_addr
, 4);
3218 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3221 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3222 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3223 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3224 /* we remove the notdirty callback only if the code has been
3226 if (dirty_flags
== 0xff)
3227 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3230 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3231 NULL
, /* never used */
3232 NULL
, /* never used */
3233 NULL
, /* never used */
3236 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3237 notdirty_mem_writeb
,
3238 notdirty_mem_writew
,
3239 notdirty_mem_writel
,
3242 /* Generate a debug exception if a watchpoint has been hit. */
3243 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3245 CPUState
*env
= cpu_single_env
;
3246 target_ulong pc
, cs_base
;
3247 TranslationBlock
*tb
;
3252 if (env
->watchpoint_hit
) {
3253 /* We re-entered the check after replacing the TB. Now raise
3254 * the debug interrupt so that is will trigger after the
3255 * current instruction. */
3256 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3259 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3260 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3261 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3262 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3263 wp
->flags
|= BP_WATCHPOINT_HIT
;
3264 if (!env
->watchpoint_hit
) {
3265 env
->watchpoint_hit
= wp
;
3266 tb
= tb_find_pc(env
->mem_io_pc
);
3268 cpu_abort(env
, "check_watchpoint: could not find TB for "
3269 "pc=%p", (void *)env
->mem_io_pc
);
3271 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3272 tb_phys_invalidate(tb
, -1);
3273 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3274 env
->exception_index
= EXCP_DEBUG
;
3276 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3277 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3279 cpu_resume_from_signal(env
, NULL
);
3282 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3287 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3288 so these check for a hit then pass through to the normal out-of-line
3290 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3292 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3293 return ldub_phys(addr
);
3296 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3298 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3299 return lduw_phys(addr
);
3302 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3304 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3305 return ldl_phys(addr
);
3308 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3311 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3312 stb_phys(addr
, val
);
3315 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3318 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3319 stw_phys(addr
, val
);
3322 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3325 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3326 stl_phys(addr
, val
);
3329 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3335 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3341 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3342 target_phys_addr_t addr
,
3345 unsigned int idx
= SUBPAGE_IDX(addr
);
3346 #if defined(DEBUG_SUBPAGE)
3347 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3348 mmio
, len
, addr
, idx
);
3351 addr
+= mmio
->region_offset
[idx
];
3352 idx
= mmio
->sub_io_index
[idx
];
3353 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3356 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3357 uint32_t value
, unsigned int len
)
3359 unsigned int idx
= SUBPAGE_IDX(addr
);
3360 #if defined(DEBUG_SUBPAGE)
3361 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3362 __func__
, mmio
, len
, addr
, idx
, value
);
3365 addr
+= mmio
->region_offset
[idx
];
3366 idx
= mmio
->sub_io_index
[idx
];
3367 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3370 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3372 return subpage_readlen(opaque
, addr
, 0);
3375 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3378 subpage_writelen(opaque
, addr
, value
, 0);
3381 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3383 return subpage_readlen(opaque
, addr
, 1);
3386 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3389 subpage_writelen(opaque
, addr
, value
, 1);
3392 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3394 return subpage_readlen(opaque
, addr
, 2);
3397 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3400 subpage_writelen(opaque
, addr
, value
, 2);
3403 static CPUReadMemoryFunc
* const subpage_read
[] = {
3409 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3415 static uint32_t subpage_ram_readb(void *opaque
, target_phys_addr_t addr
)
3417 ram_addr_t raddr
= addr
;
3418 void *ptr
= qemu_get_ram_ptr(raddr
);
3422 static void subpage_ram_writeb(void *opaque
, target_phys_addr_t addr
,
3425 ram_addr_t raddr
= addr
;
3426 void *ptr
= qemu_get_ram_ptr(raddr
);
3430 static uint32_t subpage_ram_readw(void *opaque
, target_phys_addr_t addr
)
3432 ram_addr_t raddr
= addr
;
3433 void *ptr
= qemu_get_ram_ptr(raddr
);
3437 static void subpage_ram_writew(void *opaque
, target_phys_addr_t addr
,
3440 ram_addr_t raddr
= addr
;
3441 void *ptr
= qemu_get_ram_ptr(raddr
);
3445 static uint32_t subpage_ram_readl(void *opaque
, target_phys_addr_t addr
)
3447 ram_addr_t raddr
= addr
;
3448 void *ptr
= qemu_get_ram_ptr(raddr
);
3452 static void subpage_ram_writel(void *opaque
, target_phys_addr_t addr
,
3455 ram_addr_t raddr
= addr
;
3456 void *ptr
= qemu_get_ram_ptr(raddr
);
3460 static CPUReadMemoryFunc
* const subpage_ram_read
[] = {
3466 static CPUWriteMemoryFunc
* const subpage_ram_write
[] = {
3467 &subpage_ram_writeb
,
3468 &subpage_ram_writew
,
3469 &subpage_ram_writel
,
3472 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3473 ram_addr_t memory
, ram_addr_t region_offset
)
3477 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3479 idx
= SUBPAGE_IDX(start
);
3480 eidx
= SUBPAGE_IDX(end
);
3481 #if defined(DEBUG_SUBPAGE)
3482 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3483 mmio
, start
, end
, idx
, eidx
, memory
);
3485 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
3486 memory
= IO_MEM_SUBPAGE_RAM
;
3488 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3489 for (; idx
<= eidx
; idx
++) {
3490 mmio
->sub_io_index
[idx
] = memory
;
3491 mmio
->region_offset
[idx
] = region_offset
;
3497 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3498 ram_addr_t orig_memory
,
3499 ram_addr_t region_offset
)
3504 mmio
= g_malloc0(sizeof(subpage_t
));
3507 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3508 #if defined(DEBUG_SUBPAGE)
3509 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3510 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3512 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3513 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3518 static int get_free_io_mem_idx(void)
3522 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3523 if (!io_mem_used
[i
]) {
3527 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3531 /* mem_read and mem_write are arrays of functions containing the
3532 function to access byte (index 0), word (index 1) and dword (index
3533 2). Functions can be omitted with a NULL function pointer.
3534 If io_index is non zero, the corresponding io zone is
3535 modified. If it is zero, a new io zone is allocated. The return
3536 value can be used with cpu_register_physical_memory(). (-1) is
3537 returned if error. */
3538 static int cpu_register_io_memory_fixed(int io_index
,
3539 CPUReadMemoryFunc
* const *mem_read
,
3540 CPUWriteMemoryFunc
* const *mem_write
,
3545 if (io_index
<= 0) {
3546 io_index
= get_free_io_mem_idx();
3550 io_index
>>= IO_MEM_SHIFT
;
3551 if (io_index
>= IO_MEM_NB_ENTRIES
)
3555 for (i
= 0; i
< 3; ++i
) {
3556 io_mem_read
[io_index
][i
]
3557 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3559 for (i
= 0; i
< 3; ++i
) {
3560 io_mem_write
[io_index
][i
]
3561 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3563 io_mem_opaque
[io_index
] = opaque
;
3565 return (io_index
<< IO_MEM_SHIFT
);
3568 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3569 CPUWriteMemoryFunc
* const *mem_write
,
3572 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3575 void cpu_unregister_io_memory(int io_table_address
)
3578 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3580 for (i
=0;i
< 3; i
++) {
3581 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3582 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3584 io_mem_opaque
[io_index
] = NULL
;
3585 io_mem_used
[io_index
] = 0;
3588 static void io_mem_init(void)
3592 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3593 unassigned_mem_write
, NULL
);
3594 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3595 unassigned_mem_write
, NULL
);
3596 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3597 notdirty_mem_write
, NULL
);
3598 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM
, subpage_ram_read
,
3599 subpage_ram_write
, NULL
);
3603 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3604 watch_mem_write
, NULL
);
3607 static void memory_map_init(void)
3609 system_memory
= g_malloc(sizeof(*system_memory
));
3610 memory_region_init(system_memory
, "system", INT64_MAX
);
3611 set_system_memory_map(system_memory
);
3613 system_io
= g_malloc(sizeof(*system_io
));
3614 memory_region_init(system_io
, "io", 65536);
3615 set_system_io_map(system_io
);
3618 MemoryRegion
*get_system_memory(void)
3620 return system_memory
;
3623 MemoryRegion
*get_system_io(void)
3628 #endif /* !defined(CONFIG_USER_ONLY) */
3630 /* physical memory access (slow version, mainly for debug) */
3631 #if defined(CONFIG_USER_ONLY)
3632 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3633 uint8_t *buf
, int len
, int is_write
)
3640 page
= addr
& TARGET_PAGE_MASK
;
3641 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3644 flags
= page_get_flags(page
);
3645 if (!(flags
& PAGE_VALID
))
3648 if (!(flags
& PAGE_WRITE
))
3650 /* XXX: this code should not depend on lock_user */
3651 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3654 unlock_user(p
, addr
, l
);
3656 if (!(flags
& PAGE_READ
))
3658 /* XXX: this code should not depend on lock_user */
3659 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3662 unlock_user(p
, addr
, 0);
3672 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3673 int len
, int is_write
)
3678 target_phys_addr_t page
;
3683 page
= addr
& TARGET_PAGE_MASK
;
3684 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3687 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3691 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3692 target_phys_addr_t addr1
;
3693 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3694 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3695 /* XXX: could force cpu_single_env to NULL to avoid
3697 if (l
>= 4 && ((addr1
& 3) == 0)) {
3698 /* 32 bit write access */
3700 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3702 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3703 /* 16 bit write access */
3705 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3708 /* 8 bit write access */
3710 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3715 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3717 ptr
= qemu_get_ram_ptr(addr1
);
3718 memcpy(ptr
, buf
, l
);
3719 if (!cpu_physical_memory_is_dirty(addr1
)) {
3720 /* invalidate code */
3721 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3723 cpu_physical_memory_set_dirty_flags(
3724 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3726 qemu_put_ram_ptr(ptr
);
3729 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3730 !(pd
& IO_MEM_ROMD
)) {
3731 target_phys_addr_t addr1
;
3733 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3734 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3735 if (l
>= 4 && ((addr1
& 3) == 0)) {
3736 /* 32 bit read access */
3737 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3740 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3741 /* 16 bit read access */
3742 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3746 /* 8 bit read access */
3747 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3753 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3754 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3755 qemu_put_ram_ptr(ptr
);
3764 /* used for ROM loading : can write in RAM and ROM */
3765 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3766 const uint8_t *buf
, int len
)
3770 target_phys_addr_t page
;
3775 page
= addr
& TARGET_PAGE_MASK
;
3776 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3779 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3782 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3783 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3784 !(pd
& IO_MEM_ROMD
)) {
3787 unsigned long addr1
;
3788 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3790 ptr
= qemu_get_ram_ptr(addr1
);
3791 memcpy(ptr
, buf
, l
);
3792 qemu_put_ram_ptr(ptr
);
3802 target_phys_addr_t addr
;
3803 target_phys_addr_t len
;
3806 static BounceBuffer bounce
;
3808 typedef struct MapClient
{
3810 void (*callback
)(void *opaque
);
3811 QLIST_ENTRY(MapClient
) link
;
3814 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3815 = QLIST_HEAD_INITIALIZER(map_client_list
);
3817 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3819 MapClient
*client
= g_malloc(sizeof(*client
));
3821 client
->opaque
= opaque
;
3822 client
->callback
= callback
;
3823 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3827 void cpu_unregister_map_client(void *_client
)
3829 MapClient
*client
= (MapClient
*)_client
;
3831 QLIST_REMOVE(client
, link
);
3835 static void cpu_notify_map_clients(void)
3839 while (!QLIST_EMPTY(&map_client_list
)) {
3840 client
= QLIST_FIRST(&map_client_list
);
3841 client
->callback(client
->opaque
);
3842 cpu_unregister_map_client(client
);
3846 /* Map a physical memory region into a host virtual address.
3847 * May map a subset of the requested range, given by and returned in *plen.
3848 * May return NULL if resources needed to perform the mapping are exhausted.
3849 * Use only for reads OR writes - not for read-modify-write operations.
3850 * Use cpu_register_map_client() to know when retrying the map operation is
3851 * likely to succeed.
3853 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3854 target_phys_addr_t
*plen
,
3857 target_phys_addr_t len
= *plen
;
3858 target_phys_addr_t todo
= 0;
3860 target_phys_addr_t page
;
3863 ram_addr_t raddr
= RAM_ADDR_MAX
;
3868 page
= addr
& TARGET_PAGE_MASK
;
3869 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3872 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3875 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3876 if (todo
|| bounce
.buffer
) {
3879 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3883 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3887 return bounce
.buffer
;
3890 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3898 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3903 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3904 * Will also mark the memory as dirty if is_write == 1. access_len gives
3905 * the amount of memory that was actually read or written by the caller.
3907 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3908 int is_write
, target_phys_addr_t access_len
)
3910 if (buffer
!= bounce
.buffer
) {
3912 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3913 while (access_len
) {
3915 l
= TARGET_PAGE_SIZE
;
3918 if (!cpu_physical_memory_is_dirty(addr1
)) {
3919 /* invalidate code */
3920 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3922 cpu_physical_memory_set_dirty_flags(
3923 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3929 if (xen_enabled()) {
3930 xen_invalidate_map_cache_entry(buffer
);
3935 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3937 qemu_vfree(bounce
.buffer
);
3938 bounce
.buffer
= NULL
;
3939 cpu_notify_map_clients();
3942 /* warning: addr must be aligned */
3943 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3944 enum device_endian endian
)
3952 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3955 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3956 !(pd
& IO_MEM_ROMD
)) {
3958 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3959 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3960 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3961 #if defined(TARGET_WORDS_BIGENDIAN)
3962 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3966 if (endian
== DEVICE_BIG_ENDIAN
) {
3972 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3973 (addr
& ~TARGET_PAGE_MASK
);
3975 case DEVICE_LITTLE_ENDIAN
:
3976 val
= ldl_le_p(ptr
);
3978 case DEVICE_BIG_ENDIAN
:
3979 val
= ldl_be_p(ptr
);
3989 uint32_t ldl_phys(target_phys_addr_t addr
)
3991 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3994 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3996 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3999 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4001 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4004 /* warning: addr must be aligned */
4005 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4006 enum device_endian endian
)
4014 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4017 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4018 !(pd
& IO_MEM_ROMD
)) {
4020 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4021 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4023 /* XXX This is broken when device endian != cpu endian.
4024 Fix and add "endian" variable check */
4025 #ifdef TARGET_WORDS_BIGENDIAN
4026 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4027 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4029 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4030 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4034 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4035 (addr
& ~TARGET_PAGE_MASK
);
4037 case DEVICE_LITTLE_ENDIAN
:
4038 val
= ldq_le_p(ptr
);
4040 case DEVICE_BIG_ENDIAN
:
4041 val
= ldq_be_p(ptr
);
4051 uint64_t ldq_phys(target_phys_addr_t addr
)
4053 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4056 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4058 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4061 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4063 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4067 uint32_t ldub_phys(target_phys_addr_t addr
)
4070 cpu_physical_memory_read(addr
, &val
, 1);
4074 /* warning: addr must be aligned */
4075 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4076 enum device_endian endian
)
4084 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4087 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4088 !(pd
& IO_MEM_ROMD
)) {
4090 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4091 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4092 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4093 #if defined(TARGET_WORDS_BIGENDIAN)
4094 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4098 if (endian
== DEVICE_BIG_ENDIAN
) {
4104 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4105 (addr
& ~TARGET_PAGE_MASK
);
4107 case DEVICE_LITTLE_ENDIAN
:
4108 val
= lduw_le_p(ptr
);
4110 case DEVICE_BIG_ENDIAN
:
4111 val
= lduw_be_p(ptr
);
4121 uint32_t lduw_phys(target_phys_addr_t addr
)
4123 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4126 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4128 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4131 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4133 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4136 /* warning: addr must be aligned. The ram page is not masked as dirty
4137 and the code inside is not invalidated. It is useful if the dirty
4138 bits are used to track modified PTEs */
4139 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4146 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4149 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4150 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4151 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4152 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4154 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4155 ptr
= qemu_get_ram_ptr(addr1
);
4158 if (unlikely(in_migration
)) {
4159 if (!cpu_physical_memory_is_dirty(addr1
)) {
4160 /* invalidate code */
4161 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4163 cpu_physical_memory_set_dirty_flags(
4164 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4170 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4177 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4180 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4181 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4182 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4183 #ifdef TARGET_WORDS_BIGENDIAN
4184 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4185 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4187 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4188 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4191 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4192 (addr
& ~TARGET_PAGE_MASK
);
4197 /* warning: addr must be aligned */
4198 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4199 enum device_endian endian
)
4206 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4209 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4210 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4211 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4212 #if defined(TARGET_WORDS_BIGENDIAN)
4213 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4217 if (endian
== DEVICE_BIG_ENDIAN
) {
4221 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4223 unsigned long addr1
;
4224 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4226 ptr
= qemu_get_ram_ptr(addr1
);
4228 case DEVICE_LITTLE_ENDIAN
:
4231 case DEVICE_BIG_ENDIAN
:
4238 if (!cpu_physical_memory_is_dirty(addr1
)) {
4239 /* invalidate code */
4240 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4242 cpu_physical_memory_set_dirty_flags(addr1
,
4243 (0xff & ~CODE_DIRTY_FLAG
));
4248 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4250 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4253 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4255 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4258 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4260 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4264 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4267 cpu_physical_memory_write(addr
, &v
, 1);
4270 /* warning: addr must be aligned */
4271 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4272 enum device_endian endian
)
4279 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4282 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4283 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4284 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4285 #if defined(TARGET_WORDS_BIGENDIAN)
4286 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4290 if (endian
== DEVICE_BIG_ENDIAN
) {
4294 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4296 unsigned long addr1
;
4297 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4299 ptr
= qemu_get_ram_ptr(addr1
);
4301 case DEVICE_LITTLE_ENDIAN
:
4304 case DEVICE_BIG_ENDIAN
:
4311 if (!cpu_physical_memory_is_dirty(addr1
)) {
4312 /* invalidate code */
4313 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4315 cpu_physical_memory_set_dirty_flags(addr1
,
4316 (0xff & ~CODE_DIRTY_FLAG
));
4321 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4323 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4326 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4328 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4331 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4333 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4337 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4340 cpu_physical_memory_write(addr
, &val
, 8);
4343 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4345 val
= cpu_to_le64(val
);
4346 cpu_physical_memory_write(addr
, &val
, 8);
4349 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4351 val
= cpu_to_be64(val
);
4352 cpu_physical_memory_write(addr
, &val
, 8);
4355 /* virtual memory access for debug (includes writing to ROM) */
4356 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4357 uint8_t *buf
, int len
, int is_write
)
4360 target_phys_addr_t phys_addr
;
4364 page
= addr
& TARGET_PAGE_MASK
;
4365 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4366 /* if no physical page mapped, return an error */
4367 if (phys_addr
== -1)
4369 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4372 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4374 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4376 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4385 /* in deterministic execution mode, instructions doing device I/Os
4386 must be at the end of the TB */
4387 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4389 TranslationBlock
*tb
;
4391 target_ulong pc
, cs_base
;
4394 tb
= tb_find_pc((unsigned long)retaddr
);
4396 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4399 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4400 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4401 /* Calculate how many instructions had been executed before the fault
4403 n
= n
- env
->icount_decr
.u16
.low
;
4404 /* Generate a new TB ending on the I/O insn. */
4406 /* On MIPS and SH, delay slot instructions can only be restarted if
4407 they were already the first instruction in the TB. If this is not
4408 the first instruction in a TB then re-execute the preceding
4410 #if defined(TARGET_MIPS)
4411 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4412 env
->active_tc
.PC
-= 4;
4413 env
->icount_decr
.u16
.low
++;
4414 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4416 #elif defined(TARGET_SH4)
4417 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4420 env
->icount_decr
.u16
.low
++;
4421 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4424 /* This should never happen. */
4425 if (n
> CF_COUNT_MASK
)
4426 cpu_abort(env
, "TB too big during recompile");
4428 cflags
= n
| CF_LAST_IO
;
4430 cs_base
= tb
->cs_base
;
4432 tb_phys_invalidate(tb
, -1);
4433 /* FIXME: In theory this could raise an exception. In practice
4434 we have already translated the block once so it's probably ok. */
4435 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4436 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4437 the first in the TB) then we end up generating a whole new TB and
4438 repeating the fault, which is horribly inefficient.
4439 Better would be to execute just this insn uncached, or generate a
4441 cpu_resume_from_signal(env
, NULL
);
4444 #if !defined(CONFIG_USER_ONLY)
4446 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4448 int i
, target_code_size
, max_target_code_size
;
4449 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4450 TranslationBlock
*tb
;
4452 target_code_size
= 0;
4453 max_target_code_size
= 0;
4455 direct_jmp_count
= 0;
4456 direct_jmp2_count
= 0;
4457 for(i
= 0; i
< nb_tbs
; i
++) {
4459 target_code_size
+= tb
->size
;
4460 if (tb
->size
> max_target_code_size
)
4461 max_target_code_size
= tb
->size
;
4462 if (tb
->page_addr
[1] != -1)
4464 if (tb
->tb_next_offset
[0] != 0xffff) {
4466 if (tb
->tb_next_offset
[1] != 0xffff) {
4467 direct_jmp2_count
++;
4471 /* XXX: avoid using doubles ? */
4472 cpu_fprintf(f
, "Translation buffer state:\n");
4473 cpu_fprintf(f
, "gen code size %td/%ld\n",
4474 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4475 cpu_fprintf(f
, "TB count %d/%d\n",
4476 nb_tbs
, code_gen_max_blocks
);
4477 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4478 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4479 max_target_code_size
);
4480 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4481 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4482 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4483 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4485 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4486 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4488 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4490 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4491 cpu_fprintf(f
, "\nStatistics:\n");
4492 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4493 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4494 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4495 tcg_dump_info(f
, cpu_fprintf
);
4498 #define MMUSUFFIX _cmmu
4500 #define GETPC() NULL
4501 #define env cpu_single_env
4502 #define SOFTMMU_CODE_ACCESS
4505 #include "softmmu_template.h"
4508 #include "softmmu_template.h"
4511 #include "softmmu_template.h"
4514 #include "softmmu_template.h"