2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 //#define DEBUG_TB_INVALIDATE
63 //#define DEBUG_UNASSIGNED
65 /* make various TB consistency checks */
66 //#define DEBUG_TB_CHECK
67 //#define DEBUG_TLB_CHECK
69 //#define DEBUG_IOPORT
70 //#define DEBUG_SUBPAGE
72 #if !defined(CONFIG_USER_ONLY)
73 /* TB consistency checks only implemented for usermode emulation. */
77 #define SMC_BITMAP_USE_THRESHOLD 10
79 static TranslationBlock
*tbs
;
80 static int code_gen_max_blocks
;
81 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
83 /* any access to the tbs or the page table must use this lock */
84 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
86 #if defined(__arm__) || defined(__sparc_v9__)
87 /* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
89 section close to code segment. */
90 #define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
94 /* Maximum alignment for Win32 is 16. */
95 #define code_gen_section \
96 __attribute__((aligned (16)))
98 #define code_gen_section \
99 __attribute__((aligned (32)))
102 uint8_t code_gen_prologue
[1024] code_gen_section
;
103 static uint8_t *code_gen_buffer
;
104 static unsigned long code_gen_buffer_size
;
105 /* threshold to flush the translated code buffer */
106 static unsigned long code_gen_buffer_max_size
;
107 static uint8_t *code_gen_ptr
;
109 #if !defined(CONFIG_USER_ONLY)
111 static int in_migration
;
113 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
115 static MemoryRegion
*system_memory
;
116 static MemoryRegion
*system_io
;
121 /* current CPU in the current thread. It is only valid inside
123 DEFINE_TLS(CPUState
*,cpu_single_env
);
124 /* 0 = Do not count executed instructions.
125 1 = Precise instruction counting.
126 2 = Adaptive rate instruction counting. */
129 typedef struct PageDesc
{
130 /* list of TBs intersecting this ram page */
131 TranslationBlock
*first_tb
;
132 /* in order to optimize self modifying code, we count the number
133 of lookups we do to a given page to use a bitmap */
134 unsigned int code_write_count
;
135 uint8_t *code_bitmap
;
136 #if defined(CONFIG_USER_ONLY)
141 /* In system mode we want L1_MAP to be based on ram offsets,
142 while in user mode we want it to be based on virtual addresses. */
143 #if !defined(CONFIG_USER_ONLY)
144 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
145 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
147 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
150 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
153 /* Size of the L2 (and L3, etc) page tables. */
155 #define L2_SIZE (1 << L2_BITS)
157 /* The bits remaining after N lower levels of page tables. */
158 #define P_L1_BITS_REM \
159 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160 #define V_L1_BITS_REM \
161 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163 /* Size of the L1 page table. Avoid silly small sizes. */
164 #if P_L1_BITS_REM < 4
165 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
167 #define P_L1_BITS P_L1_BITS_REM
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173 #define V_L1_BITS V_L1_BITS_REM
176 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
177 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
179 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
180 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
182 unsigned long qemu_real_host_page_size
;
183 unsigned long qemu_host_page_size
;
184 unsigned long qemu_host_page_mask
;
186 /* This is a multi-level map on the virtual address space.
187 The bottom level has pointers to PageDesc. */
188 static void *l1_map
[V_L1_SIZE
];
190 #if !defined(CONFIG_USER_ONLY)
191 typedef struct PhysPageDesc
{
192 /* offset in host memory of the page + io_index in the low bits */
193 ram_addr_t phys_offset
;
194 ram_addr_t region_offset
;
197 /* This is a multi-level map on the physical address space.
198 The bottom level has pointers to PhysPageDesc. */
199 static void *l1_phys_map
[P_L1_SIZE
];
201 static void io_mem_init(void);
202 static void memory_map_init(void);
204 /* io memory support */
205 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
206 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
207 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
208 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
209 static int io_mem_watch
;
214 static const char *logfilename
= "qemu.log";
216 static const char *logfilename
= "/tmp/qemu.log";
220 static int log_append
= 0;
223 #if !defined(CONFIG_USER_ONLY)
224 static int tlb_flush_count
;
226 static int tb_flush_count
;
227 static int tb_phys_invalidate_count
;
230 static void map_exec(void *addr
, long size
)
233 VirtualProtect(addr
, size
,
234 PAGE_EXECUTE_READWRITE
, &old_protect
);
238 static void map_exec(void *addr
, long size
)
240 unsigned long start
, end
, page_size
;
242 page_size
= getpagesize();
243 start
= (unsigned long)addr
;
244 start
&= ~(page_size
- 1);
246 end
= (unsigned long)addr
+ size
;
247 end
+= page_size
- 1;
248 end
&= ~(page_size
- 1);
250 mprotect((void *)start
, end
- start
,
251 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
255 static void page_init(void)
257 /* NOTE: we can always suppose that qemu_host_page_size >=
261 SYSTEM_INFO system_info
;
263 GetSystemInfo(&system_info
);
264 qemu_real_host_page_size
= system_info
.dwPageSize
;
267 qemu_real_host_page_size
= getpagesize();
269 if (qemu_host_page_size
== 0)
270 qemu_host_page_size
= qemu_real_host_page_size
;
271 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
272 qemu_host_page_size
= TARGET_PAGE_SIZE
;
273 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
275 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
277 #ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry
*freep
;
281 freep
= kinfo_getvmmap(getpid(), &cnt
);
284 for (i
= 0; i
< cnt
; i
++) {
285 unsigned long startaddr
, endaddr
;
287 startaddr
= freep
[i
].kve_start
;
288 endaddr
= freep
[i
].kve_end
;
289 if (h2g_valid(startaddr
)) {
290 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
292 if (h2g_valid(endaddr
)) {
293 endaddr
= h2g(endaddr
);
294 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
296 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
298 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
309 last_brk
= (unsigned long)sbrk(0);
311 f
= fopen("/compat/linux/proc/self/maps", "r");
316 unsigned long startaddr
, endaddr
;
319 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
321 if (n
== 2 && h2g_valid(startaddr
)) {
322 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
324 if (h2g_valid(endaddr
)) {
325 endaddr
= h2g(endaddr
);
329 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
341 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
347 #if defined(CONFIG_USER_ONLY)
348 /* We can't use g_malloc because it may recurse into a locked mutex. */
349 # define ALLOC(P, SIZE) \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
355 # define ALLOC(P, SIZE) \
356 do { P = g_malloc0(SIZE); } while (0)
359 /* Level 1. Always allocated. */
360 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
363 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
370 ALLOC(p
, sizeof(void *) * L2_SIZE
);
374 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
382 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
388 return pd
+ (index
& (L2_SIZE
- 1));
391 static inline PageDesc
*page_find(tb_page_addr_t index
)
393 return page_find_alloc(index
, 0);
396 #if !defined(CONFIG_USER_ONLY)
397 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
403 /* Level 1. Always allocated. */
404 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
407 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
413 *lp
= p
= g_malloc0(sizeof(void *) * L2_SIZE
);
415 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
421 int first_index
= index
& ~(L2_SIZE
- 1);
427 *lp
= pd
= g_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
429 for (i
= 0; i
< L2_SIZE
; i
++) {
430 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
431 pd
[i
].region_offset
= (first_index
+ i
) << TARGET_PAGE_BITS
;
435 return pd
+ (index
& (L2_SIZE
- 1));
438 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
440 return phys_page_find_alloc(index
, 0);
443 static void tlb_protect_code(ram_addr_t ram_addr
);
444 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
446 #define mmap_lock() do { } while(0)
447 #define mmap_unlock() do { } while(0)
450 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
452 #if defined(CONFIG_USER_ONLY)
453 /* Currently it is not recommended to allocate big chunks of data in
454 user mode. It will change when a dedicated libc will be used */
455 #define USE_STATIC_CODE_GEN_BUFFER
458 #ifdef USE_STATIC_CODE_GEN_BUFFER
459 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
460 __attribute__((aligned (CODE_GEN_ALIGN
)));
463 static void code_gen_alloc(unsigned long tb_size
)
465 #ifdef USE_STATIC_CODE_GEN_BUFFER
466 code_gen_buffer
= static_code_gen_buffer
;
467 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
468 map_exec(code_gen_buffer
, code_gen_buffer_size
);
470 code_gen_buffer_size
= tb_size
;
471 if (code_gen_buffer_size
== 0) {
472 #if defined(CONFIG_USER_ONLY)
473 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
475 /* XXX: needs adjustments */
476 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
479 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
480 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
481 /* The code gen buffer location may have constraints depending on
482 the host cpu and OS */
483 #if defined(__linux__)
488 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
489 #if defined(__x86_64__)
491 /* Cannot map more than that */
492 if (code_gen_buffer_size
> (800 * 1024 * 1024))
493 code_gen_buffer_size
= (800 * 1024 * 1024);
494 #elif defined(__sparc_v9__)
495 // Map the buffer below 2G, so we can use direct calls and branches
497 start
= (void *) 0x60000000UL
;
498 if (code_gen_buffer_size
> (512 * 1024 * 1024))
499 code_gen_buffer_size
= (512 * 1024 * 1024);
500 #elif defined(__arm__)
501 /* Keep the buffer no bigger than 16GB to branch between blocks */
502 if (code_gen_buffer_size
> 16 * 1024 * 1024)
503 code_gen_buffer_size
= 16 * 1024 * 1024;
504 #elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
510 start
= (void *)0x90000000UL
;
512 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
513 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
515 if (code_gen_buffer
== MAP_FAILED
) {
516 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
520 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__) \
522 || defined(__NetBSD__)
526 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
527 #if defined(__x86_64__)
528 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
529 * 0x40000000 is free */
531 addr
= (void *)0x40000000;
532 /* Cannot map more than that */
533 if (code_gen_buffer_size
> (800 * 1024 * 1024))
534 code_gen_buffer_size
= (800 * 1024 * 1024);
535 #elif defined(__sparc_v9__)
536 // Map the buffer below 2G, so we can use direct calls and branches
538 addr
= (void *) 0x60000000UL
;
539 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
540 code_gen_buffer_size
= (512 * 1024 * 1024);
543 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
544 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
546 if (code_gen_buffer
== MAP_FAILED
) {
547 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
552 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
553 map_exec(code_gen_buffer
, code_gen_buffer_size
);
555 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
556 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
557 code_gen_buffer_max_size
= code_gen_buffer_size
-
558 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
559 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
560 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
563 /* Must be called before using the QEMU cpus. 'tb_size' is the size
564 (in bytes) allocated to the translation buffer. Zero means default
566 void tcg_exec_init(unsigned long tb_size
)
569 code_gen_alloc(tb_size
);
570 code_gen_ptr
= code_gen_buffer
;
572 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
573 /* There's no guest base to take into account, so go ahead and
574 initialize the prologue now. */
575 tcg_prologue_init(&tcg_ctx
);
579 bool tcg_enabled(void)
581 return code_gen_buffer
!= NULL
;
584 void cpu_exec_init_all(void)
586 #if !defined(CONFIG_USER_ONLY)
592 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
594 static int cpu_common_post_load(void *opaque
, int version_id
)
596 CPUState
*env
= opaque
;
598 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
599 version_id is increased. */
600 env
->interrupt_request
&= ~0x01;
606 static const VMStateDescription vmstate_cpu_common
= {
607 .name
= "cpu_common",
609 .minimum_version_id
= 1,
610 .minimum_version_id_old
= 1,
611 .post_load
= cpu_common_post_load
,
612 .fields
= (VMStateField
[]) {
613 VMSTATE_UINT32(halted
, CPUState
),
614 VMSTATE_UINT32(interrupt_request
, CPUState
),
615 VMSTATE_END_OF_LIST()
620 CPUState
*qemu_get_cpu(int cpu
)
622 CPUState
*env
= first_cpu
;
625 if (env
->cpu_index
== cpu
)
633 void cpu_exec_init(CPUState
*env
)
638 #if defined(CONFIG_USER_ONLY)
641 env
->next_cpu
= NULL
;
644 while (*penv
!= NULL
) {
645 penv
= &(*penv
)->next_cpu
;
648 env
->cpu_index
= cpu_index
;
650 QTAILQ_INIT(&env
->breakpoints
);
651 QTAILQ_INIT(&env
->watchpoints
);
652 #ifndef CONFIG_USER_ONLY
653 env
->thread_id
= qemu_get_thread_id();
656 #if defined(CONFIG_USER_ONLY)
659 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
660 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
661 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
662 cpu_save
, cpu_load
, env
);
666 /* Allocate a new translation block. Flush the translation buffer if
667 too many translation blocks or too much generated code. */
668 static TranslationBlock
*tb_alloc(target_ulong pc
)
670 TranslationBlock
*tb
;
672 if (nb_tbs
>= code_gen_max_blocks
||
673 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
681 void tb_free(TranslationBlock
*tb
)
683 /* In practice this is mostly used for single use temporary TB
684 Ignore the hard cases and just back up if this TB happens to
685 be the last one generated. */
686 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
687 code_gen_ptr
= tb
->tc_ptr
;
692 static inline void invalidate_page_bitmap(PageDesc
*p
)
694 if (p
->code_bitmap
) {
695 g_free(p
->code_bitmap
);
696 p
->code_bitmap
= NULL
;
698 p
->code_write_count
= 0;
701 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
703 static void page_flush_tb_1 (int level
, void **lp
)
712 for (i
= 0; i
< L2_SIZE
; ++i
) {
713 pd
[i
].first_tb
= NULL
;
714 invalidate_page_bitmap(pd
+ i
);
718 for (i
= 0; i
< L2_SIZE
; ++i
) {
719 page_flush_tb_1 (level
- 1, pp
+ i
);
724 static void page_flush_tb(void)
727 for (i
= 0; i
< V_L1_SIZE
; i
++) {
728 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
732 /* flush all the translation blocks */
733 /* XXX: tb_flush is currently not thread safe */
734 void tb_flush(CPUState
*env1
)
737 #if defined(DEBUG_FLUSH)
738 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
739 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
741 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
743 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
744 cpu_abort(env1
, "Internal error: code buffer overflow\n");
748 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
749 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
752 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
755 code_gen_ptr
= code_gen_buffer
;
756 /* XXX: flush processor icache at this point if cache flush is
761 #ifdef DEBUG_TB_CHECK
763 static void tb_invalidate_check(target_ulong address
)
765 TranslationBlock
*tb
;
767 address
&= TARGET_PAGE_MASK
;
768 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
769 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
770 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
771 address
>= tb
->pc
+ tb
->size
)) {
772 printf("ERROR invalidate: address=" TARGET_FMT_lx
773 " PC=%08lx size=%04x\n",
774 address
, (long)tb
->pc
, tb
->size
);
780 /* verify that all the pages have correct rights for code */
781 static void tb_page_check(void)
783 TranslationBlock
*tb
;
784 int i
, flags1
, flags2
;
786 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
787 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
788 flags1
= page_get_flags(tb
->pc
);
789 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
790 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
791 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
792 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
800 /* invalidate one TB */
801 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
804 TranslationBlock
*tb1
;
808 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
811 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
815 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
817 TranslationBlock
*tb1
;
823 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
825 *ptb
= tb1
->page_next
[n1
];
828 ptb
= &tb1
->page_next
[n1
];
832 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
834 TranslationBlock
*tb1
, **ptb
;
837 ptb
= &tb
->jmp_next
[n
];
840 /* find tb(n) in circular list */
844 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
845 if (n1
== n
&& tb1
== tb
)
848 ptb
= &tb1
->jmp_first
;
850 ptb
= &tb1
->jmp_next
[n1
];
853 /* now we can suppress tb(n) from the list */
854 *ptb
= tb
->jmp_next
[n
];
856 tb
->jmp_next
[n
] = NULL
;
860 /* reset the jump entry 'n' of a TB so that it is not chained to
862 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
864 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
867 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
872 tb_page_addr_t phys_pc
;
873 TranslationBlock
*tb1
, *tb2
;
875 /* remove the TB from the hash list */
876 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
877 h
= tb_phys_hash_func(phys_pc
);
878 tb_remove(&tb_phys_hash
[h
], tb
,
879 offsetof(TranslationBlock
, phys_hash_next
));
881 /* remove the TB from the page list */
882 if (tb
->page_addr
[0] != page_addr
) {
883 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
884 tb_page_remove(&p
->first_tb
, tb
);
885 invalidate_page_bitmap(p
);
887 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
888 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
889 tb_page_remove(&p
->first_tb
, tb
);
890 invalidate_page_bitmap(p
);
893 tb_invalidated_flag
= 1;
895 /* remove the TB from the hash list */
896 h
= tb_jmp_cache_hash_func(tb
->pc
);
897 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
898 if (env
->tb_jmp_cache
[h
] == tb
)
899 env
->tb_jmp_cache
[h
] = NULL
;
902 /* suppress this TB from the two jump lists */
903 tb_jmp_remove(tb
, 0);
904 tb_jmp_remove(tb
, 1);
906 /* suppress any remaining jumps to this TB */
912 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
913 tb2
= tb1
->jmp_next
[n1
];
914 tb_reset_jump(tb1
, n1
);
915 tb1
->jmp_next
[n1
] = NULL
;
918 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
920 tb_phys_invalidate_count
++;
923 static inline void set_bits(uint8_t *tab
, int start
, int len
)
929 mask
= 0xff << (start
& 7);
930 if ((start
& ~7) == (end
& ~7)) {
932 mask
&= ~(0xff << (end
& 7));
937 start
= (start
+ 8) & ~7;
939 while (start
< end1
) {
944 mask
= ~(0xff << (end
& 7));
950 static void build_page_bitmap(PageDesc
*p
)
952 int n
, tb_start
, tb_end
;
953 TranslationBlock
*tb
;
955 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
960 tb
= (TranslationBlock
*)((long)tb
& ~3);
961 /* NOTE: this is subtle as a TB may span two physical pages */
963 /* NOTE: tb_end may be after the end of the page, but
964 it is not a problem */
965 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
966 tb_end
= tb_start
+ tb
->size
;
967 if (tb_end
> TARGET_PAGE_SIZE
)
968 tb_end
= TARGET_PAGE_SIZE
;
971 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
973 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
974 tb
= tb
->page_next
[n
];
978 TranslationBlock
*tb_gen_code(CPUState
*env
,
979 target_ulong pc
, target_ulong cs_base
,
980 int flags
, int cflags
)
982 TranslationBlock
*tb
;
984 tb_page_addr_t phys_pc
, phys_page2
;
985 target_ulong virt_page2
;
988 phys_pc
= get_page_addr_code(env
, pc
);
991 /* flush must be done */
993 /* cannot fail at this point */
995 /* Don't forget to invalidate previous TB info. */
996 tb_invalidated_flag
= 1;
998 tc_ptr
= code_gen_ptr
;
1000 tb
->cs_base
= cs_base
;
1002 tb
->cflags
= cflags
;
1003 cpu_gen_code(env
, tb
, &code_gen_size
);
1004 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1006 /* check next page if needed */
1007 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1009 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1010 phys_page2
= get_page_addr_code(env
, virt_page2
);
1012 tb_link_page(tb
, phys_pc
, phys_page2
);
1016 /* invalidate all TBs which intersect with the target physical page
1017 starting in range [start;end[. NOTE: start and end must refer to
1018 the same physical page. 'is_cpu_write_access' should be true if called
1019 from a real cpu write access: the virtual CPU will exit the current
1020 TB if code is modified inside this TB. */
1021 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1022 int is_cpu_write_access
)
1024 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1025 CPUState
*env
= cpu_single_env
;
1026 tb_page_addr_t tb_start
, tb_end
;
1029 #ifdef TARGET_HAS_PRECISE_SMC
1030 int current_tb_not_found
= is_cpu_write_access
;
1031 TranslationBlock
*current_tb
= NULL
;
1032 int current_tb_modified
= 0;
1033 target_ulong current_pc
= 0;
1034 target_ulong current_cs_base
= 0;
1035 int current_flags
= 0;
1036 #endif /* TARGET_HAS_PRECISE_SMC */
1038 p
= page_find(start
>> TARGET_PAGE_BITS
);
1041 if (!p
->code_bitmap
&&
1042 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1043 is_cpu_write_access
) {
1044 /* build code bitmap */
1045 build_page_bitmap(p
);
1048 /* we remove all the TBs in the range [start, end[ */
1049 /* XXX: see if in some cases it could be faster to invalidate all the code */
1051 while (tb
!= NULL
) {
1053 tb
= (TranslationBlock
*)((long)tb
& ~3);
1054 tb_next
= tb
->page_next
[n
];
1055 /* NOTE: this is subtle as a TB may span two physical pages */
1057 /* NOTE: tb_end may be after the end of the page, but
1058 it is not a problem */
1059 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1060 tb_end
= tb_start
+ tb
->size
;
1062 tb_start
= tb
->page_addr
[1];
1063 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1065 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1066 #ifdef TARGET_HAS_PRECISE_SMC
1067 if (current_tb_not_found
) {
1068 current_tb_not_found
= 0;
1070 if (env
->mem_io_pc
) {
1071 /* now we have a real cpu fault */
1072 current_tb
= tb_find_pc(env
->mem_io_pc
);
1075 if (current_tb
== tb
&&
1076 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1077 /* If we are modifying the current TB, we must stop
1078 its execution. We could be more precise by checking
1079 that the modification is after the current PC, but it
1080 would require a specialized function to partially
1081 restore the CPU state */
1083 current_tb_modified
= 1;
1084 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1085 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1088 #endif /* TARGET_HAS_PRECISE_SMC */
1089 /* we need to do that to handle the case where a signal
1090 occurs while doing tb_phys_invalidate() */
1093 saved_tb
= env
->current_tb
;
1094 env
->current_tb
= NULL
;
1096 tb_phys_invalidate(tb
, -1);
1098 env
->current_tb
= saved_tb
;
1099 if (env
->interrupt_request
&& env
->current_tb
)
1100 cpu_interrupt(env
, env
->interrupt_request
);
1105 #if !defined(CONFIG_USER_ONLY)
1106 /* if no code remaining, no need to continue to use slow writes */
1108 invalidate_page_bitmap(p
);
1109 if (is_cpu_write_access
) {
1110 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1114 #ifdef TARGET_HAS_PRECISE_SMC
1115 if (current_tb_modified
) {
1116 /* we generate a block containing just the instruction
1117 modifying the memory. It will ensure that it cannot modify
1119 env
->current_tb
= NULL
;
1120 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1121 cpu_resume_from_signal(env
, NULL
);
1126 /* len must be <= 8 and start must be a multiple of len */
1127 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1133 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1134 cpu_single_env
->mem_io_vaddr
, len
,
1135 cpu_single_env
->eip
,
1136 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1139 p
= page_find(start
>> TARGET_PAGE_BITS
);
1142 if (p
->code_bitmap
) {
1143 offset
= start
& ~TARGET_PAGE_MASK
;
1144 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1145 if (b
& ((1 << len
) - 1))
1149 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1153 #if !defined(CONFIG_SOFTMMU)
1154 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1155 unsigned long pc
, void *puc
)
1157 TranslationBlock
*tb
;
1160 #ifdef TARGET_HAS_PRECISE_SMC
1161 TranslationBlock
*current_tb
= NULL
;
1162 CPUState
*env
= cpu_single_env
;
1163 int current_tb_modified
= 0;
1164 target_ulong current_pc
= 0;
1165 target_ulong current_cs_base
= 0;
1166 int current_flags
= 0;
1169 addr
&= TARGET_PAGE_MASK
;
1170 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1174 #ifdef TARGET_HAS_PRECISE_SMC
1175 if (tb
&& pc
!= 0) {
1176 current_tb
= tb_find_pc(pc
);
1179 while (tb
!= NULL
) {
1181 tb
= (TranslationBlock
*)((long)tb
& ~3);
1182 #ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb
== tb
&&
1184 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1185 /* If we are modifying the current TB, we must stop
1186 its execution. We could be more precise by checking
1187 that the modification is after the current PC, but it
1188 would require a specialized function to partially
1189 restore the CPU state */
1191 current_tb_modified
= 1;
1192 cpu_restore_state(current_tb
, env
, pc
);
1193 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1196 #endif /* TARGET_HAS_PRECISE_SMC */
1197 tb_phys_invalidate(tb
, addr
);
1198 tb
= tb
->page_next
[n
];
1201 #ifdef TARGET_HAS_PRECISE_SMC
1202 if (current_tb_modified
) {
1203 /* we generate a block containing just the instruction
1204 modifying the memory. It will ensure that it cannot modify
1206 env
->current_tb
= NULL
;
1207 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1208 cpu_resume_from_signal(env
, puc
);
1214 /* add the tb in the target page and protect it if necessary */
1215 static inline void tb_alloc_page(TranslationBlock
*tb
,
1216 unsigned int n
, tb_page_addr_t page_addr
)
1219 #ifndef CONFIG_USER_ONLY
1220 bool page_already_protected
;
1223 tb
->page_addr
[n
] = page_addr
;
1224 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1225 tb
->page_next
[n
] = p
->first_tb
;
1226 #ifndef CONFIG_USER_ONLY
1227 page_already_protected
= p
->first_tb
!= NULL
;
1229 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1230 invalidate_page_bitmap(p
);
1232 #if defined(TARGET_HAS_SMC) || 1
1234 #if defined(CONFIG_USER_ONLY)
1235 if (p
->flags
& PAGE_WRITE
) {
1240 /* force the host page as non writable (writes will have a
1241 page fault + mprotect overhead) */
1242 page_addr
&= qemu_host_page_mask
;
1244 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1245 addr
+= TARGET_PAGE_SIZE
) {
1247 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1251 p2
->flags
&= ~PAGE_WRITE
;
1253 mprotect(g2h(page_addr
), qemu_host_page_size
,
1254 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1255 #ifdef DEBUG_TB_INVALIDATE
1256 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1261 /* if some code is already present, then the pages are already
1262 protected. So we handle the case where only the first TB is
1263 allocated in a physical page */
1264 if (!page_already_protected
) {
1265 tlb_protect_code(page_addr
);
1269 #endif /* TARGET_HAS_SMC */
1272 /* add a new TB and link it to the physical page tables. phys_page2 is
1273 (-1) to indicate that only one page contains the TB. */
1274 void tb_link_page(TranslationBlock
*tb
,
1275 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1278 TranslationBlock
**ptb
;
1280 /* Grab the mmap lock to stop another thread invalidating this TB
1281 before we are done. */
1283 /* add in the physical hash table */
1284 h
= tb_phys_hash_func(phys_pc
);
1285 ptb
= &tb_phys_hash
[h
];
1286 tb
->phys_hash_next
= *ptb
;
1289 /* add in the page list */
1290 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1291 if (phys_page2
!= -1)
1292 tb_alloc_page(tb
, 1, phys_page2
);
1294 tb
->page_addr
[1] = -1;
1296 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1297 tb
->jmp_next
[0] = NULL
;
1298 tb
->jmp_next
[1] = NULL
;
1300 /* init original jump addresses */
1301 if (tb
->tb_next_offset
[0] != 0xffff)
1302 tb_reset_jump(tb
, 0);
1303 if (tb
->tb_next_offset
[1] != 0xffff)
1304 tb_reset_jump(tb
, 1);
1306 #ifdef DEBUG_TB_CHECK
1312 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1313 tb[1].tc_ptr. Return NULL if not found */
1314 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1316 int m_min
, m_max
, m
;
1318 TranslationBlock
*tb
;
1322 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1323 tc_ptr
>= (unsigned long)code_gen_ptr
)
1325 /* binary search (cf Knuth) */
1328 while (m_min
<= m_max
) {
1329 m
= (m_min
+ m_max
) >> 1;
1331 v
= (unsigned long)tb
->tc_ptr
;
1334 else if (tc_ptr
< v
) {
1343 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1345 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1347 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1350 tb1
= tb
->jmp_next
[n
];
1352 /* find head of list */
1355 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1358 tb1
= tb1
->jmp_next
[n1
];
1360 /* we are now sure now that tb jumps to tb1 */
1363 /* remove tb from the jmp_first list */
1364 ptb
= &tb_next
->jmp_first
;
1368 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1369 if (n1
== n
&& tb1
== tb
)
1371 ptb
= &tb1
->jmp_next
[n1
];
1373 *ptb
= tb
->jmp_next
[n
];
1374 tb
->jmp_next
[n
] = NULL
;
1376 /* suppress the jump to next tb in generated code */
1377 tb_reset_jump(tb
, n
);
1379 /* suppress jumps in the tb on which we could have jumped */
1380 tb_reset_jump_recursive(tb_next
);
1384 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1386 tb_reset_jump_recursive2(tb
, 0);
1387 tb_reset_jump_recursive2(tb
, 1);
1390 #if defined(TARGET_HAS_ICE)
1391 #if defined(CONFIG_USER_ONLY)
1392 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1394 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1397 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1399 target_phys_addr_t addr
;
1401 ram_addr_t ram_addr
;
1404 addr
= cpu_get_phys_page_debug(env
, pc
);
1405 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1407 pd
= IO_MEM_UNASSIGNED
;
1409 pd
= p
->phys_offset
;
1411 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1412 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1415 #endif /* TARGET_HAS_ICE */
1417 #if defined(CONFIG_USER_ONLY)
1418 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1423 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1424 int flags
, CPUWatchpoint
**watchpoint
)
1429 /* Add a watchpoint. */
1430 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1431 int flags
, CPUWatchpoint
**watchpoint
)
1433 target_ulong len_mask
= ~(len
- 1);
1436 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1437 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1438 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1439 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1442 wp
= g_malloc(sizeof(*wp
));
1445 wp
->len_mask
= len_mask
;
1448 /* keep all GDB-injected watchpoints in front */
1450 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1452 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1454 tlb_flush_page(env
, addr
);
1461 /* Remove a specific watchpoint. */
1462 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1465 target_ulong len_mask
= ~(len
- 1);
1468 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1469 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1470 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1471 cpu_watchpoint_remove_by_ref(env
, wp
);
1478 /* Remove a specific watchpoint by reference. */
1479 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1481 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1483 tlb_flush_page(env
, watchpoint
->vaddr
);
1488 /* Remove all matching watchpoints. */
1489 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1491 CPUWatchpoint
*wp
, *next
;
1493 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1494 if (wp
->flags
& mask
)
1495 cpu_watchpoint_remove_by_ref(env
, wp
);
1500 /* Add a breakpoint. */
1501 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1502 CPUBreakpoint
**breakpoint
)
1504 #if defined(TARGET_HAS_ICE)
1507 bp
= g_malloc(sizeof(*bp
));
1512 /* keep all GDB-injected breakpoints in front */
1514 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1516 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1518 breakpoint_invalidate(env
, pc
);
1528 /* Remove a specific breakpoint. */
1529 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1531 #if defined(TARGET_HAS_ICE)
1534 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1535 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1536 cpu_breakpoint_remove_by_ref(env
, bp
);
1546 /* Remove a specific breakpoint by reference. */
1547 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1549 #if defined(TARGET_HAS_ICE)
1550 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1552 breakpoint_invalidate(env
, breakpoint
->pc
);
1558 /* Remove all matching breakpoints. */
1559 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1561 #if defined(TARGET_HAS_ICE)
1562 CPUBreakpoint
*bp
, *next
;
1564 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1565 if (bp
->flags
& mask
)
1566 cpu_breakpoint_remove_by_ref(env
, bp
);
1571 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1572 CPU loop after each instruction */
1573 void cpu_single_step(CPUState
*env
, int enabled
)
1575 #if defined(TARGET_HAS_ICE)
1576 if (env
->singlestep_enabled
!= enabled
) {
1577 env
->singlestep_enabled
= enabled
;
1579 kvm_update_guest_debug(env
, 0);
1581 /* must flush all the translated code to avoid inconsistencies */
1582 /* XXX: only flush what is necessary */
1589 /* enable or disable low levels log */
1590 void cpu_set_log(int log_flags
)
1592 loglevel
= log_flags
;
1593 if (loglevel
&& !logfile
) {
1594 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1596 perror(logfilename
);
1599 #if !defined(CONFIG_SOFTMMU)
1600 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1602 static char logfile_buf
[4096];
1603 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1605 #elif defined(_WIN32)
1606 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1607 setvbuf(logfile
, NULL
, _IONBF
, 0);
1609 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1613 if (!loglevel
&& logfile
) {
1619 void cpu_set_log_filename(const char *filename
)
1621 logfilename
= strdup(filename
);
1626 cpu_set_log(loglevel
);
1629 static void cpu_unlink_tb(CPUState
*env
)
1631 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1632 problem and hope the cpu will stop of its own accord. For userspace
1633 emulation this often isn't actually as bad as it sounds. Often
1634 signals are used primarily to interrupt blocking syscalls. */
1635 TranslationBlock
*tb
;
1636 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1638 spin_lock(&interrupt_lock
);
1639 tb
= env
->current_tb
;
1640 /* if the cpu is currently executing code, we must unlink it and
1641 all the potentially executing TB */
1643 env
->current_tb
= NULL
;
1644 tb_reset_jump_recursive(tb
);
1646 spin_unlock(&interrupt_lock
);
1649 #ifndef CONFIG_USER_ONLY
1650 /* mask must never be zero, except for A20 change call */
1651 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1655 old_mask
= env
->interrupt_request
;
1656 env
->interrupt_request
|= mask
;
1659 * If called from iothread context, wake the target cpu in
1662 if (!qemu_cpu_is_self(env
)) {
1668 env
->icount_decr
.u16
.high
= 0xffff;
1670 && (mask
& ~old_mask
) != 0) {
1671 cpu_abort(env
, "Raised interrupt while not in I/O function");
1678 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1680 #else /* CONFIG_USER_ONLY */
1682 void cpu_interrupt(CPUState
*env
, int mask
)
1684 env
->interrupt_request
|= mask
;
1687 #endif /* CONFIG_USER_ONLY */
1689 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1691 env
->interrupt_request
&= ~mask
;
1694 void cpu_exit(CPUState
*env
)
1696 env
->exit_request
= 1;
1700 const CPULogItem cpu_log_items
[] = {
1701 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1702 "show generated host assembly code for each compiled TB" },
1703 { CPU_LOG_TB_IN_ASM
, "in_asm",
1704 "show target assembly code for each compiled TB" },
1705 { CPU_LOG_TB_OP
, "op",
1706 "show micro ops for each compiled TB" },
1707 { CPU_LOG_TB_OP_OPT
, "op_opt",
1710 "before eflags optimization and "
1712 "after liveness analysis" },
1713 { CPU_LOG_INT
, "int",
1714 "show interrupts/exceptions in short format" },
1715 { CPU_LOG_EXEC
, "exec",
1716 "show trace before each executed TB (lots of logs)" },
1717 { CPU_LOG_TB_CPU
, "cpu",
1718 "show CPU state before block translation" },
1720 { CPU_LOG_PCALL
, "pcall",
1721 "show protected mode far calls/returns/exceptions" },
1722 { CPU_LOG_RESET
, "cpu_reset",
1723 "show CPU state before CPU resets" },
1726 { CPU_LOG_IOPORT
, "ioport",
1727 "show all i/o ports accesses" },
1732 #ifndef CONFIG_USER_ONLY
1733 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1734 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1736 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1738 ram_addr_t phys_offset
,
1741 CPUPhysMemoryClient
*client
;
1742 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1743 client
->set_memory(client
, start_addr
, size
, phys_offset
, log_dirty
);
1747 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1748 target_phys_addr_t end
)
1750 CPUPhysMemoryClient
*client
;
1751 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1752 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1759 static int cpu_notify_migration_log(int enable
)
1761 CPUPhysMemoryClient
*client
;
1762 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1763 int r
= client
->migration_log(client
, enable
);
1771 target_phys_addr_t start_addr
;
1773 ram_addr_t phys_offset
;
1776 /* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1777 * address. Each intermediate table provides the next L2_BITs of guest
1778 * physical address space. The number of levels vary based on host and
1779 * guest configuration, making it efficient to build the final guest
1780 * physical address by seeding the L1 offset and shifting and adding in
1781 * each L2 offset as we recurse through them. */
1782 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
, int level
,
1783 void **lp
, target_phys_addr_t addr
,
1784 struct last_map
*map
)
1792 PhysPageDesc
*pd
= *lp
;
1793 addr
<<= L2_BITS
+ TARGET_PAGE_BITS
;
1794 for (i
= 0; i
< L2_SIZE
; ++i
) {
1795 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1796 target_phys_addr_t start_addr
= addr
| i
<< TARGET_PAGE_BITS
;
1799 start_addr
== map
->start_addr
+ map
->size
&&
1800 pd
[i
].phys_offset
== map
->phys_offset
+ map
->size
) {
1802 map
->size
+= TARGET_PAGE_SIZE
;
1804 } else if (map
->size
) {
1805 client
->set_memory(client
, map
->start_addr
,
1806 map
->size
, map
->phys_offset
, false);
1809 map
->start_addr
= start_addr
;
1810 map
->size
= TARGET_PAGE_SIZE
;
1811 map
->phys_offset
= pd
[i
].phys_offset
;
1816 for (i
= 0; i
< L2_SIZE
; ++i
) {
1817 phys_page_for_each_1(client
, level
- 1, pp
+ i
,
1818 (addr
<< L2_BITS
) | i
, map
);
1823 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1826 struct last_map map
= { };
1828 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1829 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1830 l1_phys_map
+ i
, i
, &map
);
1833 client
->set_memory(client
, map
.start_addr
, map
.size
, map
.phys_offset
,
1838 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1840 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1841 phys_page_for_each(client
);
1844 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1846 QLIST_REMOVE(client
, list
);
1850 static int cmp1(const char *s1
, int n
, const char *s2
)
1852 if (strlen(s2
) != n
)
1854 return memcmp(s1
, s2
, n
) == 0;
1857 /* takes a comma separated list of log masks. Return 0 if error. */
1858 int cpu_str_to_log_mask(const char *str
)
1860 const CPULogItem
*item
;
1867 p1
= strchr(p
, ',');
1870 if(cmp1(p
,p1
-p
,"all")) {
1871 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1875 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1876 if (cmp1(p
, p1
- p
, item
->name
))
1890 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1897 fprintf(stderr
, "qemu: fatal: ");
1898 vfprintf(stderr
, fmt
, ap
);
1899 fprintf(stderr
, "\n");
1901 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1903 cpu_dump_state(env
, stderr
, fprintf
, 0);
1905 if (qemu_log_enabled()) {
1906 qemu_log("qemu: fatal: ");
1907 qemu_log_vprintf(fmt
, ap2
);
1910 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1912 log_cpu_state(env
, 0);
1919 #if defined(CONFIG_USER_ONLY)
1921 struct sigaction act
;
1922 sigfillset(&act
.sa_mask
);
1923 act
.sa_handler
= SIG_DFL
;
1924 sigaction(SIGABRT
, &act
, NULL
);
1930 CPUState
*cpu_copy(CPUState
*env
)
1932 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1933 CPUState
*next_cpu
= new_env
->next_cpu
;
1934 int cpu_index
= new_env
->cpu_index
;
1935 #if defined(TARGET_HAS_ICE)
1940 memcpy(new_env
, env
, sizeof(CPUState
));
1942 /* Preserve chaining and index. */
1943 new_env
->next_cpu
= next_cpu
;
1944 new_env
->cpu_index
= cpu_index
;
1946 /* Clone all break/watchpoints.
1947 Note: Once we support ptrace with hw-debug register access, make sure
1948 BP_CPU break/watchpoints are handled correctly on clone. */
1949 QTAILQ_INIT(&env
->breakpoints
);
1950 QTAILQ_INIT(&env
->watchpoints
);
1951 #if defined(TARGET_HAS_ICE)
1952 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1953 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1955 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1956 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1964 #if !defined(CONFIG_USER_ONLY)
1966 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1970 /* Discard jump cache entries for any tb which might potentially
1971 overlap the flushed page. */
1972 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1973 memset (&env
->tb_jmp_cache
[i
], 0,
1974 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1976 i
= tb_jmp_cache_hash_page(addr
);
1977 memset (&env
->tb_jmp_cache
[i
], 0,
1978 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1981 static CPUTLBEntry s_cputlb_empty_entry
= {
1988 /* NOTE: if flush_global is true, also flush global entries (not
1990 void tlb_flush(CPUState
*env
, int flush_global
)
1994 #if defined(DEBUG_TLB)
1995 printf("tlb_flush:\n");
1997 /* must reset current TB so that interrupts cannot modify the
1998 links while we are modifying them */
1999 env
->current_tb
= NULL
;
2001 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
2003 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2004 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
2008 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
2010 env
->tlb_flush_addr
= -1;
2011 env
->tlb_flush_mask
= 0;
2015 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
2017 if (addr
== (tlb_entry
->addr_read
&
2018 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2019 addr
== (tlb_entry
->addr_write
&
2020 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
2021 addr
== (tlb_entry
->addr_code
&
2022 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
2023 *tlb_entry
= s_cputlb_empty_entry
;
2027 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2032 #if defined(DEBUG_TLB)
2033 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
2035 /* Check if we need to flush due to large pages. */
2036 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
2037 #if defined(DEBUG_TLB)
2038 printf("tlb_flush_page: forced full flush ("
2039 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
2040 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
2045 /* must reset current TB so that interrupts cannot modify the
2046 links while we are modifying them */
2047 env
->current_tb
= NULL
;
2049 addr
&= TARGET_PAGE_MASK
;
2050 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2051 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2052 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
2054 tlb_flush_jmp_cache(env
, addr
);
2057 /* update the TLBs so that writes to code in the virtual page 'addr'
2059 static void tlb_protect_code(ram_addr_t ram_addr
)
2061 cpu_physical_memory_reset_dirty(ram_addr
,
2062 ram_addr
+ TARGET_PAGE_SIZE
,
2066 /* update the TLB so that writes in physical page 'phys_addr' are no longer
2067 tested for self modifying code */
2068 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
2071 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
2074 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
2075 unsigned long start
, unsigned long length
)
2078 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2079 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
2080 if ((addr
- start
) < length
) {
2081 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
2086 /* Note: start and end must be within the same ram block. */
2087 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2091 unsigned long length
, start1
;
2094 start
&= TARGET_PAGE_MASK
;
2095 end
= TARGET_PAGE_ALIGN(end
);
2097 length
= end
- start
;
2100 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2102 /* we modify the TLB cache so that the dirty bit will be set again
2103 when accessing the range */
2104 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2105 /* Check that we don't span multiple blocks - this breaks the
2106 address comparisons below. */
2107 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2108 != (end
- 1) - start
) {
2112 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2114 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2115 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2116 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2122 int cpu_physical_memory_set_dirty_tracking(int enable
)
2125 in_migration
= enable
;
2126 ret
= cpu_notify_migration_log(!!enable
);
2130 int cpu_physical_memory_get_dirty_tracking(void)
2132 return in_migration
;
2135 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2136 target_phys_addr_t end_addr
)
2140 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2144 int cpu_physical_log_start(target_phys_addr_t start_addr
,
2147 CPUPhysMemoryClient
*client
;
2148 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2149 if (client
->log_start
) {
2150 int r
= client
->log_start(client
, start_addr
, size
);
2159 int cpu_physical_log_stop(target_phys_addr_t start_addr
,
2162 CPUPhysMemoryClient
*client
;
2163 QLIST_FOREACH(client
, &memory_client_list
, list
) {
2164 if (client
->log_stop
) {
2165 int r
= client
->log_stop(client
, start_addr
, size
);
2174 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2176 ram_addr_t ram_addr
;
2179 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2180 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2181 + tlb_entry
->addend
);
2182 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2183 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2184 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2189 /* update the TLB according to the current state of the dirty bits */
2190 void cpu_tlb_update_dirty(CPUState
*env
)
2194 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2195 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2196 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2200 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2202 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2203 tlb_entry
->addr_write
= vaddr
;
2206 /* update the TLB corresponding to virtual page vaddr
2207 so that it is no longer dirty */
2208 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2213 vaddr
&= TARGET_PAGE_MASK
;
2214 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2215 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2216 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2219 /* Our TLB does not support large pages, so remember the area covered by
2220 large pages and trigger a full TLB flush if these are invalidated. */
2221 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2224 target_ulong mask
= ~(size
- 1);
2226 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2227 env
->tlb_flush_addr
= vaddr
& mask
;
2228 env
->tlb_flush_mask
= mask
;
2231 /* Extend the existing region to include the new page.
2232 This is a compromise between unnecessary flushes and the cost
2233 of maintaining a full variable size TLB. */
2234 mask
&= env
->tlb_flush_mask
;
2235 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2238 env
->tlb_flush_addr
&= mask
;
2239 env
->tlb_flush_mask
= mask
;
2242 /* Add a new TLB entry. At most one entry for a given virtual address
2243 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2244 supplied size is only used by tlb_flush_page. */
2245 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2246 target_phys_addr_t paddr
, int prot
,
2247 int mmu_idx
, target_ulong size
)
2252 target_ulong address
;
2253 target_ulong code_address
;
2254 unsigned long addend
;
2257 target_phys_addr_t iotlb
;
2259 assert(size
>= TARGET_PAGE_SIZE
);
2260 if (size
!= TARGET_PAGE_SIZE
) {
2261 tlb_add_large_page(env
, vaddr
, size
);
2263 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2265 pd
= IO_MEM_UNASSIGNED
;
2267 pd
= p
->phys_offset
;
2269 #if defined(DEBUG_TLB)
2270 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2271 " prot=%x idx=%d pd=0x%08lx\n",
2272 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2276 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2277 /* IO memory case (romd handled later) */
2278 address
|= TLB_MMIO
;
2280 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2281 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2283 iotlb
= pd
& TARGET_PAGE_MASK
;
2284 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2285 iotlb
|= IO_MEM_NOTDIRTY
;
2287 iotlb
|= IO_MEM_ROM
;
2289 /* IO handlers are currently passed a physical address.
2290 It would be nice to pass an offset from the base address
2291 of that region. This would avoid having to special case RAM,
2292 and avoid full address decoding in every device.
2293 We can't use the high bits of pd for this because
2294 IO_MEM_ROMD uses these as a ram address. */
2295 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2297 iotlb
+= p
->region_offset
;
2303 code_address
= address
;
2304 /* Make accesses to pages with watchpoints go via the
2305 watchpoint trap routines. */
2306 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2307 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2308 /* Avoid trapping reads of pages with a write breakpoint. */
2309 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2310 iotlb
= io_mem_watch
+ paddr
;
2311 address
|= TLB_MMIO
;
2317 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2318 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2319 te
= &env
->tlb_table
[mmu_idx
][index
];
2320 te
->addend
= addend
- vaddr
;
2321 if (prot
& PAGE_READ
) {
2322 te
->addr_read
= address
;
2327 if (prot
& PAGE_EXEC
) {
2328 te
->addr_code
= code_address
;
2332 if (prot
& PAGE_WRITE
) {
2333 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2334 (pd
& IO_MEM_ROMD
)) {
2335 /* Write access calls the I/O callback. */
2336 te
->addr_write
= address
| TLB_MMIO
;
2337 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2338 !cpu_physical_memory_is_dirty(pd
)) {
2339 te
->addr_write
= address
| TLB_NOTDIRTY
;
2341 te
->addr_write
= address
;
2344 te
->addr_write
= -1;
2350 void tlb_flush(CPUState
*env
, int flush_global
)
2354 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2359 * Walks guest process memory "regions" one by one
2360 * and calls callback function 'fn' for each region.
2363 struct walk_memory_regions_data
2365 walk_memory_regions_fn fn
;
2367 unsigned long start
;
2371 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2372 abi_ulong end
, int new_prot
)
2374 if (data
->start
!= -1ul) {
2375 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2381 data
->start
= (new_prot
? end
: -1ul);
2382 data
->prot
= new_prot
;
2387 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2388 abi_ulong base
, int level
, void **lp
)
2394 return walk_memory_regions_end(data
, base
, 0);
2399 for (i
= 0; i
< L2_SIZE
; ++i
) {
2400 int prot
= pd
[i
].flags
;
2402 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2403 if (prot
!= data
->prot
) {
2404 rc
= walk_memory_regions_end(data
, pa
, prot
);
2412 for (i
= 0; i
< L2_SIZE
; ++i
) {
2413 pa
= base
| ((abi_ulong
)i
<<
2414 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2415 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2425 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2427 struct walk_memory_regions_data data
;
2435 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2436 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2437 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2443 return walk_memory_regions_end(&data
, 0, 0);
2446 static int dump_region(void *priv
, abi_ulong start
,
2447 abi_ulong end
, unsigned long prot
)
2449 FILE *f
= (FILE *)priv
;
2451 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2452 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2453 start
, end
, end
- start
,
2454 ((prot
& PAGE_READ
) ? 'r' : '-'),
2455 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2456 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2461 /* dump memory mappings */
2462 void page_dump(FILE *f
)
2464 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2465 "start", "end", "size", "prot");
2466 walk_memory_regions(f
, dump_region
);
2469 int page_get_flags(target_ulong address
)
2473 p
= page_find(address
>> TARGET_PAGE_BITS
);
2479 /* Modify the flags of a page and invalidate the code if necessary.
2480 The flag PAGE_WRITE_ORG is positioned automatically depending
2481 on PAGE_WRITE. The mmap_lock should already be held. */
2482 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2484 target_ulong addr
, len
;
2486 /* This function should never be called with addresses outside the
2487 guest address space. If this assert fires, it probably indicates
2488 a missing call to h2g_valid. */
2489 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2490 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2492 assert(start
< end
);
2494 start
= start
& TARGET_PAGE_MASK
;
2495 end
= TARGET_PAGE_ALIGN(end
);
2497 if (flags
& PAGE_WRITE
) {
2498 flags
|= PAGE_WRITE_ORG
;
2501 for (addr
= start
, len
= end
- start
;
2503 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2504 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2506 /* If the write protection bit is set, then we invalidate
2508 if (!(p
->flags
& PAGE_WRITE
) &&
2509 (flags
& PAGE_WRITE
) &&
2511 tb_invalidate_phys_page(addr
, 0, NULL
);
2517 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2523 /* This function should never be called with addresses outside the
2524 guest address space. If this assert fires, it probably indicates
2525 a missing call to h2g_valid. */
2526 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2527 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2533 if (start
+ len
- 1 < start
) {
2534 /* We've wrapped around. */
2538 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2539 start
= start
& TARGET_PAGE_MASK
;
2541 for (addr
= start
, len
= end
- start
;
2543 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2544 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2547 if( !(p
->flags
& PAGE_VALID
) )
2550 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2552 if (flags
& PAGE_WRITE
) {
2553 if (!(p
->flags
& PAGE_WRITE_ORG
))
2555 /* unprotect the page if it was put read-only because it
2556 contains translated code */
2557 if (!(p
->flags
& PAGE_WRITE
)) {
2558 if (!page_unprotect(addr
, 0, NULL
))
2567 /* called from signal handler: invalidate the code and unprotect the
2568 page. Return TRUE if the fault was successfully handled. */
2569 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2573 target_ulong host_start
, host_end
, addr
;
2575 /* Technically this isn't safe inside a signal handler. However we
2576 know this only ever happens in a synchronous SEGV handler, so in
2577 practice it seems to be ok. */
2580 p
= page_find(address
>> TARGET_PAGE_BITS
);
2586 /* if the page was really writable, then we change its
2587 protection back to writable */
2588 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2589 host_start
= address
& qemu_host_page_mask
;
2590 host_end
= host_start
+ qemu_host_page_size
;
2593 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2594 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2595 p
->flags
|= PAGE_WRITE
;
2598 /* and since the content will be modified, we must invalidate
2599 the corresponding translated code. */
2600 tb_invalidate_phys_page(addr
, pc
, puc
);
2601 #ifdef DEBUG_TB_CHECK
2602 tb_invalidate_check(addr
);
2605 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2615 static inline void tlb_set_dirty(CPUState
*env
,
2616 unsigned long addr
, target_ulong vaddr
)
2619 #endif /* defined(CONFIG_USER_ONLY) */
2621 #if !defined(CONFIG_USER_ONLY)
2623 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2624 typedef struct subpage_t
{
2625 target_phys_addr_t base
;
2626 ram_addr_t sub_io_index
[TARGET_PAGE_SIZE
];
2627 ram_addr_t region_offset
[TARGET_PAGE_SIZE
];
2630 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2631 ram_addr_t memory
, ram_addr_t region_offset
);
2632 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2633 ram_addr_t orig_memory
,
2634 ram_addr_t region_offset
);
2635 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2638 if (addr > start_addr) \
2641 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2642 if (start_addr2 > 0) \
2646 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2647 end_addr2 = TARGET_PAGE_SIZE - 1; \
2649 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2650 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2655 /* register physical memory.
2656 For RAM, 'size' must be a multiple of the target page size.
2657 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2658 io memory page. The address used when calling the IO function is
2659 the offset from the start of the region, plus region_offset. Both
2660 start_addr and region_offset are rounded down to a page boundary
2661 before calculating this offset. This should not be a problem unless
2662 the low bits of start_addr and region_offset differ. */
2663 void cpu_register_physical_memory_log(target_phys_addr_t start_addr
,
2665 ram_addr_t phys_offset
,
2666 ram_addr_t region_offset
,
2669 target_phys_addr_t addr
, end_addr
;
2672 ram_addr_t orig_size
= size
;
2676 cpu_notify_set_memory(start_addr
, size
, phys_offset
, log_dirty
);
2678 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2679 region_offset
= start_addr
;
2681 region_offset
&= TARGET_PAGE_MASK
;
2682 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2683 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2687 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2688 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2689 ram_addr_t orig_memory
= p
->phys_offset
;
2690 target_phys_addr_t start_addr2
, end_addr2
;
2691 int need_subpage
= 0;
2693 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2696 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2697 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2698 &p
->phys_offset
, orig_memory
,
2701 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2704 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2706 p
->region_offset
= 0;
2708 p
->phys_offset
= phys_offset
;
2709 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2710 (phys_offset
& IO_MEM_ROMD
))
2711 phys_offset
+= TARGET_PAGE_SIZE
;
2714 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2715 p
->phys_offset
= phys_offset
;
2716 p
->region_offset
= region_offset
;
2717 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2718 (phys_offset
& IO_MEM_ROMD
)) {
2719 phys_offset
+= TARGET_PAGE_SIZE
;
2721 target_phys_addr_t start_addr2
, end_addr2
;
2722 int need_subpage
= 0;
2724 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2725 end_addr2
, need_subpage
);
2728 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2729 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2730 addr
& TARGET_PAGE_MASK
);
2731 subpage_register(subpage
, start_addr2
, end_addr2
,
2732 phys_offset
, region_offset
);
2733 p
->region_offset
= 0;
2737 region_offset
+= TARGET_PAGE_SIZE
;
2738 addr
+= TARGET_PAGE_SIZE
;
2739 } while (addr
!= end_addr
);
2741 /* since each CPU stores ram addresses in its TLB cache, we must
2742 reset the modified entries */
2744 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2749 /* XXX: temporary until new memory mapping API */
2750 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2754 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2756 return IO_MEM_UNASSIGNED
;
2757 return p
->phys_offset
;
2760 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2763 kvm_coalesce_mmio_region(addr
, size
);
2766 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2769 kvm_uncoalesce_mmio_region(addr
, size
);
2772 void qemu_flush_coalesced_mmio_buffer(void)
2775 kvm_flush_coalesced_mmio_buffer();
2778 #if defined(__linux__) && !defined(TARGET_S390X)
2780 #include <sys/vfs.h>
2782 #define HUGETLBFS_MAGIC 0x958458f6
2784 static long gethugepagesize(const char *path
)
2790 ret
= statfs(path
, &fs
);
2791 } while (ret
!= 0 && errno
== EINTR
);
2798 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2799 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2804 static void *file_ram_alloc(RAMBlock
*block
,
2814 unsigned long hpagesize
;
2816 hpagesize
= gethugepagesize(path
);
2821 if (memory
< hpagesize
) {
2825 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2826 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2830 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2834 fd
= mkstemp(filename
);
2836 perror("unable to create backing store for hugepages");
2843 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2846 * ftruncate is not supported by hugetlbfs in older
2847 * hosts, so don't bother bailing out on errors.
2848 * If anything goes wrong with it under other filesystems,
2851 if (ftruncate(fd
, memory
))
2852 perror("ftruncate");
2855 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2856 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2857 * to sidestep this quirk.
2859 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2860 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2862 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2864 if (area
== MAP_FAILED
) {
2865 perror("file_ram_alloc: can't mmap RAM pages");
2874 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2876 RAMBlock
*block
, *next_block
;
2877 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2879 if (QLIST_EMPTY(&ram_list
.blocks
))
2882 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2883 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2885 end
= block
->offset
+ block
->length
;
2887 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2888 if (next_block
->offset
>= end
) {
2889 next
= MIN(next
, next_block
->offset
);
2892 if (next
- end
>= size
&& next
- end
< mingap
) {
2894 mingap
= next
- end
;
2898 if (offset
== RAM_ADDR_MAX
) {
2899 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2907 static ram_addr_t
last_ram_offset(void)
2910 ram_addr_t last
= 0;
2912 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2913 last
= MAX(last
, block
->offset
+ block
->length
);
2918 ram_addr_t
qemu_ram_alloc_from_ptr(DeviceState
*dev
, const char *name
,
2919 ram_addr_t size
, void *host
,
2922 RAMBlock
*new_block
, *block
;
2924 size
= TARGET_PAGE_ALIGN(size
);
2925 new_block
= g_malloc0(sizeof(*new_block
));
2927 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2928 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2930 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2934 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2936 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2937 if (!strcmp(block
->idstr
, new_block
->idstr
)) {
2938 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2944 new_block
->offset
= find_ram_offset(size
);
2946 new_block
->host
= host
;
2947 new_block
->flags
|= RAM_PREALLOC_MASK
;
2950 #if defined (__linux__) && !defined(TARGET_S390X)
2951 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2952 if (!new_block
->host
) {
2953 new_block
->host
= qemu_vmalloc(size
);
2954 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2957 fprintf(stderr
, "-mem-path option unsupported\n");
2961 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2962 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2963 an system defined value, which is at least 256GB. Larger systems
2964 have larger values. We put the guest between the end of data
2965 segment (system break) and this value. We use 32GB as a base to
2966 have enough room for the system break to grow. */
2967 new_block
->host
= mmap((void*)0x800000000, size
,
2968 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2969 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2970 if (new_block
->host
== MAP_FAILED
) {
2971 fprintf(stderr
, "Allocating RAM failed\n");
2975 if (xen_enabled()) {
2976 xen_ram_alloc(new_block
->offset
, size
, mr
);
2978 new_block
->host
= qemu_vmalloc(size
);
2981 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2984 new_block
->length
= size
;
2986 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2988 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2989 last_ram_offset() >> TARGET_PAGE_BITS
);
2990 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2991 0xff, size
>> TARGET_PAGE_BITS
);
2994 kvm_setup_guest_memory(new_block
->host
, size
);
2996 return new_block
->offset
;
2999 ram_addr_t
qemu_ram_alloc(DeviceState
*dev
, const char *name
, ram_addr_t size
,
3002 return qemu_ram_alloc_from_ptr(dev
, name
, size
, NULL
, mr
);
3005 void qemu_ram_free_from_ptr(ram_addr_t addr
)
3009 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3010 if (addr
== block
->offset
) {
3011 QLIST_REMOVE(block
, next
);
3018 void qemu_ram_free(ram_addr_t addr
)
3022 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3023 if (addr
== block
->offset
) {
3024 QLIST_REMOVE(block
, next
);
3025 if (block
->flags
& RAM_PREALLOC_MASK
) {
3027 } else if (mem_path
) {
3028 #if defined (__linux__) && !defined(TARGET_S390X)
3030 munmap(block
->host
, block
->length
);
3033 qemu_vfree(block
->host
);
3039 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3040 munmap(block
->host
, block
->length
);
3042 if (xen_enabled()) {
3043 xen_invalidate_map_cache_entry(block
->host
);
3045 qemu_vfree(block
->host
);
3057 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
3064 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3065 offset
= addr
- block
->offset
;
3066 if (offset
< block
->length
) {
3067 vaddr
= block
->host
+ offset
;
3068 if (block
->flags
& RAM_PREALLOC_MASK
) {
3072 munmap(vaddr
, length
);
3074 #if defined(__linux__) && !defined(TARGET_S390X)
3077 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
3080 flags
|= MAP_PRIVATE
;
3082 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3083 flags
, block
->fd
, offset
);
3085 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3086 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3093 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3094 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3095 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3098 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3099 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3103 if (area
!= vaddr
) {
3104 fprintf(stderr
, "Could not remap addr: "
3105 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
3109 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3115 #endif /* !_WIN32 */
3117 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3118 With the exception of the softmmu code in this file, this should
3119 only be used for local memory (e.g. video ram) that the device owns,
3120 and knows it isn't going to access beyond the end of the block.
3122 It should not be used for general purpose DMA.
3123 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3125 void *qemu_get_ram_ptr(ram_addr_t addr
)
3129 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3130 if (addr
- block
->offset
< block
->length
) {
3131 /* Move this entry to to start of the list. */
3132 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3133 QLIST_REMOVE(block
, next
);
3134 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3136 if (xen_enabled()) {
3137 /* We need to check if the requested address is in the RAM
3138 * because we don't want to map the entire memory in QEMU.
3139 * In that case just map until the end of the page.
3141 if (block
->offset
== 0) {
3142 return xen_map_cache(addr
, 0, 0);
3143 } else if (block
->host
== NULL
) {
3145 xen_map_cache(block
->offset
, block
->length
, 1);
3148 return block
->host
+ (addr
- block
->offset
);
3152 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3158 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3159 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3161 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3165 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3166 if (addr
- block
->offset
< block
->length
) {
3167 if (xen_enabled()) {
3168 /* We need to check if the requested address is in the RAM
3169 * because we don't want to map the entire memory in QEMU.
3170 * In that case just map until the end of the page.
3172 if (block
->offset
== 0) {
3173 return xen_map_cache(addr
, 0, 0);
3174 } else if (block
->host
== NULL
) {
3176 xen_map_cache(block
->offset
, block
->length
, 1);
3179 return block
->host
+ (addr
- block
->offset
);
3183 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3189 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3190 * but takes a size argument */
3191 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3196 if (xen_enabled()) {
3197 return xen_map_cache(addr
, *size
, 1);
3201 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3202 if (addr
- block
->offset
< block
->length
) {
3203 if (addr
- block
->offset
+ *size
> block
->length
)
3204 *size
= block
->length
- addr
+ block
->offset
;
3205 return block
->host
+ (addr
- block
->offset
);
3209 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3214 void qemu_put_ram_ptr(void *addr
)
3216 trace_qemu_put_ram_ptr(addr
);
3219 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3222 uint8_t *host
= ptr
;
3224 if (xen_enabled()) {
3225 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3229 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3230 /* This case append when the block is not mapped. */
3231 if (block
->host
== NULL
) {
3234 if (host
- block
->host
< block
->length
) {
3235 *ram_addr
= block
->offset
+ (host
- block
->host
);
3243 /* Some of the softmmu routines need to translate from a host pointer
3244 (typically a TLB entry) back to a ram offset. */
3245 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3247 ram_addr_t ram_addr
;
3249 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3250 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3256 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
3258 #ifdef DEBUG_UNASSIGNED
3259 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3261 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3262 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 1);
3267 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
3269 #ifdef DEBUG_UNASSIGNED
3270 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3272 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3273 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 2);
3278 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
3280 #ifdef DEBUG_UNASSIGNED
3281 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3283 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3284 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, 4);
3289 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3291 #ifdef DEBUG_UNASSIGNED
3292 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3294 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3295 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 1);
3299 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3301 #ifdef DEBUG_UNASSIGNED
3302 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3304 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3305 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 2);
3309 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
3311 #ifdef DEBUG_UNASSIGNED
3312 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
3314 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3315 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, 4);
3319 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
3320 unassigned_mem_readb
,
3321 unassigned_mem_readw
,
3322 unassigned_mem_readl
,
3325 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
3326 unassigned_mem_writeb
,
3327 unassigned_mem_writew
,
3328 unassigned_mem_writel
,
3331 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
3335 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3336 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3337 #if !defined(CONFIG_USER_ONLY)
3338 tb_invalidate_phys_page_fast(ram_addr
, 1);
3339 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3342 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3343 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3344 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3345 /* we remove the notdirty callback only if the code has been
3347 if (dirty_flags
== 0xff)
3348 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3351 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
3355 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3356 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3357 #if !defined(CONFIG_USER_ONLY)
3358 tb_invalidate_phys_page_fast(ram_addr
, 2);
3359 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3362 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3363 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3364 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3365 /* we remove the notdirty callback only if the code has been
3367 if (dirty_flags
== 0xff)
3368 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3371 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
3375 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3376 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3377 #if !defined(CONFIG_USER_ONLY)
3378 tb_invalidate_phys_page_fast(ram_addr
, 4);
3379 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3382 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3383 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3384 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3385 /* we remove the notdirty callback only if the code has been
3387 if (dirty_flags
== 0xff)
3388 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3391 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
3392 NULL
, /* never used */
3393 NULL
, /* never used */
3394 NULL
, /* never used */
3397 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3398 notdirty_mem_writeb
,
3399 notdirty_mem_writew
,
3400 notdirty_mem_writel
,
3403 /* Generate a debug exception if a watchpoint has been hit. */
3404 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3406 CPUState
*env
= cpu_single_env
;
3407 target_ulong pc
, cs_base
;
3408 TranslationBlock
*tb
;
3413 if (env
->watchpoint_hit
) {
3414 /* We re-entered the check after replacing the TB. Now raise
3415 * the debug interrupt so that is will trigger after the
3416 * current instruction. */
3417 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3420 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3421 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3422 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3423 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3424 wp
->flags
|= BP_WATCHPOINT_HIT
;
3425 if (!env
->watchpoint_hit
) {
3426 env
->watchpoint_hit
= wp
;
3427 tb
= tb_find_pc(env
->mem_io_pc
);
3429 cpu_abort(env
, "check_watchpoint: could not find TB for "
3430 "pc=%p", (void *)env
->mem_io_pc
);
3432 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3433 tb_phys_invalidate(tb
, -1);
3434 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3435 env
->exception_index
= EXCP_DEBUG
;
3437 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3438 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3440 cpu_resume_from_signal(env
, NULL
);
3443 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3448 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3449 so these check for a hit then pass through to the normal out-of-line
3451 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3453 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3454 return ldub_phys(addr
);
3457 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3459 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3460 return lduw_phys(addr
);
3463 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3465 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3466 return ldl_phys(addr
);
3469 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3472 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3473 stb_phys(addr
, val
);
3476 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3479 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3480 stw_phys(addr
, val
);
3483 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3486 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3487 stl_phys(addr
, val
);
3490 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3496 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3502 static inline uint32_t subpage_readlen (subpage_t
*mmio
,
3503 target_phys_addr_t addr
,
3506 unsigned int idx
= SUBPAGE_IDX(addr
);
3507 #if defined(DEBUG_SUBPAGE)
3508 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3509 mmio
, len
, addr
, idx
);
3512 addr
+= mmio
->region_offset
[idx
];
3513 idx
= mmio
->sub_io_index
[idx
];
3514 return io_mem_read
[idx
][len
](io_mem_opaque
[idx
], addr
);
3517 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3518 uint32_t value
, unsigned int len
)
3520 unsigned int idx
= SUBPAGE_IDX(addr
);
3521 #if defined(DEBUG_SUBPAGE)
3522 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n",
3523 __func__
, mmio
, len
, addr
, idx
, value
);
3526 addr
+= mmio
->region_offset
[idx
];
3527 idx
= mmio
->sub_io_index
[idx
];
3528 io_mem_write
[idx
][len
](io_mem_opaque
[idx
], addr
, value
);
3531 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3533 return subpage_readlen(opaque
, addr
, 0);
3536 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3539 subpage_writelen(opaque
, addr
, value
, 0);
3542 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3544 return subpage_readlen(opaque
, addr
, 1);
3547 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3550 subpage_writelen(opaque
, addr
, value
, 1);
3553 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3555 return subpage_readlen(opaque
, addr
, 2);
3558 static void subpage_writel (void *opaque
, target_phys_addr_t addr
,
3561 subpage_writelen(opaque
, addr
, value
, 2);
3564 static CPUReadMemoryFunc
* const subpage_read
[] = {
3570 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3576 static uint32_t subpage_ram_readb(void *opaque
, target_phys_addr_t addr
)
3578 ram_addr_t raddr
= addr
;
3579 void *ptr
= qemu_get_ram_ptr(raddr
);
3583 static void subpage_ram_writeb(void *opaque
, target_phys_addr_t addr
,
3586 ram_addr_t raddr
= addr
;
3587 void *ptr
= qemu_get_ram_ptr(raddr
);
3591 static uint32_t subpage_ram_readw(void *opaque
, target_phys_addr_t addr
)
3593 ram_addr_t raddr
= addr
;
3594 void *ptr
= qemu_get_ram_ptr(raddr
);
3598 static void subpage_ram_writew(void *opaque
, target_phys_addr_t addr
,
3601 ram_addr_t raddr
= addr
;
3602 void *ptr
= qemu_get_ram_ptr(raddr
);
3606 static uint32_t subpage_ram_readl(void *opaque
, target_phys_addr_t addr
)
3608 ram_addr_t raddr
= addr
;
3609 void *ptr
= qemu_get_ram_ptr(raddr
);
3613 static void subpage_ram_writel(void *opaque
, target_phys_addr_t addr
,
3616 ram_addr_t raddr
= addr
;
3617 void *ptr
= qemu_get_ram_ptr(raddr
);
3621 static CPUReadMemoryFunc
* const subpage_ram_read
[] = {
3627 static CPUWriteMemoryFunc
* const subpage_ram_write
[] = {
3628 &subpage_ram_writeb
,
3629 &subpage_ram_writew
,
3630 &subpage_ram_writel
,
3633 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3634 ram_addr_t memory
, ram_addr_t region_offset
)
3638 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3640 idx
= SUBPAGE_IDX(start
);
3641 eidx
= SUBPAGE_IDX(end
);
3642 #if defined(DEBUG_SUBPAGE)
3643 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3644 mmio
, start
, end
, idx
, eidx
, memory
);
3646 if ((memory
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
3647 memory
= IO_MEM_SUBPAGE_RAM
;
3649 memory
= (memory
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3650 for (; idx
<= eidx
; idx
++) {
3651 mmio
->sub_io_index
[idx
] = memory
;
3652 mmio
->region_offset
[idx
] = region_offset
;
3658 static subpage_t
*subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3659 ram_addr_t orig_memory
,
3660 ram_addr_t region_offset
)
3665 mmio
= g_malloc0(sizeof(subpage_t
));
3668 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
,
3669 DEVICE_NATIVE_ENDIAN
);
3670 #if defined(DEBUG_SUBPAGE)
3671 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3672 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3674 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3675 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_memory
, region_offset
);
3680 static int get_free_io_mem_idx(void)
3684 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3685 if (!io_mem_used
[i
]) {
3689 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3694 * Usually, devices operate in little endian mode. There are devices out
3695 * there that operate in big endian too. Each device gets byte swapped
3696 * mmio if plugged onto a CPU that does the other endianness.
3706 typedef struct SwapEndianContainer
{
3707 CPUReadMemoryFunc
*read
[3];
3708 CPUWriteMemoryFunc
*write
[3];
3710 } SwapEndianContainer
;
3712 static uint32_t swapendian_mem_readb (void *opaque
, target_phys_addr_t addr
)
3715 SwapEndianContainer
*c
= opaque
;
3716 val
= c
->read
[0](c
->opaque
, addr
);
3720 static uint32_t swapendian_mem_readw(void *opaque
, target_phys_addr_t addr
)
3723 SwapEndianContainer
*c
= opaque
;
3724 val
= bswap16(c
->read
[1](c
->opaque
, addr
));
3728 static uint32_t swapendian_mem_readl(void *opaque
, target_phys_addr_t addr
)
3731 SwapEndianContainer
*c
= opaque
;
3732 val
= bswap32(c
->read
[2](c
->opaque
, addr
));
3736 static CPUReadMemoryFunc
* const swapendian_readfn
[3]={
3737 swapendian_mem_readb
,
3738 swapendian_mem_readw
,
3739 swapendian_mem_readl
3742 static void swapendian_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3745 SwapEndianContainer
*c
= opaque
;
3746 c
->write
[0](c
->opaque
, addr
, val
);
3749 static void swapendian_mem_writew(void *opaque
, target_phys_addr_t addr
,
3752 SwapEndianContainer
*c
= opaque
;
3753 c
->write
[1](c
->opaque
, addr
, bswap16(val
));
3756 static void swapendian_mem_writel(void *opaque
, target_phys_addr_t addr
,
3759 SwapEndianContainer
*c
= opaque
;
3760 c
->write
[2](c
->opaque
, addr
, bswap32(val
));
3763 static CPUWriteMemoryFunc
* const swapendian_writefn
[3]={
3764 swapendian_mem_writeb
,
3765 swapendian_mem_writew
,
3766 swapendian_mem_writel
3769 static void swapendian_init(int io_index
)
3771 SwapEndianContainer
*c
= g_malloc(sizeof(SwapEndianContainer
));
3774 /* Swap mmio for big endian targets */
3775 c
->opaque
= io_mem_opaque
[io_index
];
3776 for (i
= 0; i
< 3; i
++) {
3777 c
->read
[i
] = io_mem_read
[io_index
][i
];
3778 c
->write
[i
] = io_mem_write
[io_index
][i
];
3780 io_mem_read
[io_index
][i
] = swapendian_readfn
[i
];
3781 io_mem_write
[io_index
][i
] = swapendian_writefn
[i
];
3783 io_mem_opaque
[io_index
] = c
;
3786 static void swapendian_del(int io_index
)
3788 if (io_mem_read
[io_index
][0] == swapendian_readfn
[0]) {
3789 g_free(io_mem_opaque
[io_index
]);
3793 /* mem_read and mem_write are arrays of functions containing the
3794 function to access byte (index 0), word (index 1) and dword (index
3795 2). Functions can be omitted with a NULL function pointer.
3796 If io_index is non zero, the corresponding io zone is
3797 modified. If it is zero, a new io zone is allocated. The return
3798 value can be used with cpu_register_physical_memory(). (-1) is
3799 returned if error. */
3800 static int cpu_register_io_memory_fixed(int io_index
,
3801 CPUReadMemoryFunc
* const *mem_read
,
3802 CPUWriteMemoryFunc
* const *mem_write
,
3803 void *opaque
, enum device_endian endian
)
3807 if (io_index
<= 0) {
3808 io_index
= get_free_io_mem_idx();
3812 io_index
>>= IO_MEM_SHIFT
;
3813 if (io_index
>= IO_MEM_NB_ENTRIES
)
3817 for (i
= 0; i
< 3; ++i
) {
3818 io_mem_read
[io_index
][i
]
3819 = (mem_read
[i
] ? mem_read
[i
] : unassigned_mem_read
[i
]);
3821 for (i
= 0; i
< 3; ++i
) {
3822 io_mem_write
[io_index
][i
]
3823 = (mem_write
[i
] ? mem_write
[i
] : unassigned_mem_write
[i
]);
3825 io_mem_opaque
[io_index
] = opaque
;
3828 case DEVICE_BIG_ENDIAN
:
3829 #ifndef TARGET_WORDS_BIGENDIAN
3830 swapendian_init(io_index
);
3833 case DEVICE_LITTLE_ENDIAN
:
3834 #ifdef TARGET_WORDS_BIGENDIAN
3835 swapendian_init(io_index
);
3838 case DEVICE_NATIVE_ENDIAN
:
3843 return (io_index
<< IO_MEM_SHIFT
);
3846 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3847 CPUWriteMemoryFunc
* const *mem_write
,
3848 void *opaque
, enum device_endian endian
)
3850 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
, endian
);
3853 void cpu_unregister_io_memory(int io_table_address
)
3856 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3858 swapendian_del(io_index
);
3860 for (i
=0;i
< 3; i
++) {
3861 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3862 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3864 io_mem_opaque
[io_index
] = NULL
;
3865 io_mem_used
[io_index
] = 0;
3868 static void io_mem_init(void)
3872 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
,
3873 unassigned_mem_write
, NULL
,
3874 DEVICE_NATIVE_ENDIAN
);
3875 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
,
3876 unassigned_mem_write
, NULL
,
3877 DEVICE_NATIVE_ENDIAN
);
3878 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
,
3879 notdirty_mem_write
, NULL
,
3880 DEVICE_NATIVE_ENDIAN
);
3881 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM
, subpage_ram_read
,
3882 subpage_ram_write
, NULL
,
3883 DEVICE_NATIVE_ENDIAN
);
3887 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3888 watch_mem_write
, NULL
,
3889 DEVICE_NATIVE_ENDIAN
);
3892 static void memory_map_init(void)
3894 system_memory
= g_malloc(sizeof(*system_memory
));
3895 memory_region_init(system_memory
, "system", INT64_MAX
);
3896 set_system_memory_map(system_memory
);
3898 system_io
= g_malloc(sizeof(*system_io
));
3899 memory_region_init(system_io
, "io", 65536);
3900 set_system_io_map(system_io
);
3903 MemoryRegion
*get_system_memory(void)
3905 return system_memory
;
3908 MemoryRegion
*get_system_io(void)
3913 #endif /* !defined(CONFIG_USER_ONLY) */
3915 /* physical memory access (slow version, mainly for debug) */
3916 #if defined(CONFIG_USER_ONLY)
3917 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3918 uint8_t *buf
, int len
, int is_write
)
3925 page
= addr
& TARGET_PAGE_MASK
;
3926 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3929 flags
= page_get_flags(page
);
3930 if (!(flags
& PAGE_VALID
))
3933 if (!(flags
& PAGE_WRITE
))
3935 /* XXX: this code should not depend on lock_user */
3936 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3939 unlock_user(p
, addr
, l
);
3941 if (!(flags
& PAGE_READ
))
3943 /* XXX: this code should not depend on lock_user */
3944 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3947 unlock_user(p
, addr
, 0);
3957 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3958 int len
, int is_write
)
3963 target_phys_addr_t page
;
3968 page
= addr
& TARGET_PAGE_MASK
;
3969 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3972 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3974 pd
= IO_MEM_UNASSIGNED
;
3976 pd
= p
->phys_offset
;
3980 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3981 target_phys_addr_t addr1
= addr
;
3982 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3984 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3985 /* XXX: could force cpu_single_env to NULL to avoid
3987 if (l
>= 4 && ((addr1
& 3) == 0)) {
3988 /* 32 bit write access */
3990 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3992 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3993 /* 16 bit write access */
3995 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3998 /* 8 bit write access */
4000 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
4005 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4007 ptr
= qemu_get_ram_ptr(addr1
);
4008 memcpy(ptr
, buf
, l
);
4009 if (!cpu_physical_memory_is_dirty(addr1
)) {
4010 /* invalidate code */
4011 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4013 cpu_physical_memory_set_dirty_flags(
4014 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4016 qemu_put_ram_ptr(ptr
);
4019 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4020 !(pd
& IO_MEM_ROMD
)) {
4021 target_phys_addr_t addr1
= addr
;
4023 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4025 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4026 if (l
>= 4 && ((addr1
& 3) == 0)) {
4027 /* 32 bit read access */
4028 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
4031 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
4032 /* 16 bit read access */
4033 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
4037 /* 8 bit read access */
4038 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
4044 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
4045 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
4046 qemu_put_ram_ptr(ptr
);
4055 /* used for ROM loading : can write in RAM and ROM */
4056 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
4057 const uint8_t *buf
, int len
)
4061 target_phys_addr_t page
;
4066 page
= addr
& TARGET_PAGE_MASK
;
4067 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4070 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4072 pd
= IO_MEM_UNASSIGNED
;
4074 pd
= p
->phys_offset
;
4077 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
4078 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
4079 !(pd
& IO_MEM_ROMD
)) {
4082 unsigned long addr1
;
4083 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4085 ptr
= qemu_get_ram_ptr(addr1
);
4086 memcpy(ptr
, buf
, l
);
4087 qemu_put_ram_ptr(ptr
);
4097 target_phys_addr_t addr
;
4098 target_phys_addr_t len
;
4101 static BounceBuffer bounce
;
4103 typedef struct MapClient
{
4105 void (*callback
)(void *opaque
);
4106 QLIST_ENTRY(MapClient
) link
;
4109 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
4110 = QLIST_HEAD_INITIALIZER(map_client_list
);
4112 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
4114 MapClient
*client
= g_malloc(sizeof(*client
));
4116 client
->opaque
= opaque
;
4117 client
->callback
= callback
;
4118 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
4122 void cpu_unregister_map_client(void *_client
)
4124 MapClient
*client
= (MapClient
*)_client
;
4126 QLIST_REMOVE(client
, link
);
4130 static void cpu_notify_map_clients(void)
4134 while (!QLIST_EMPTY(&map_client_list
)) {
4135 client
= QLIST_FIRST(&map_client_list
);
4136 client
->callback(client
->opaque
);
4137 cpu_unregister_map_client(client
);
4141 /* Map a physical memory region into a host virtual address.
4142 * May map a subset of the requested range, given by and returned in *plen.
4143 * May return NULL if resources needed to perform the mapping are exhausted.
4144 * Use only for reads OR writes - not for read-modify-write operations.
4145 * Use cpu_register_map_client() to know when retrying the map operation is
4146 * likely to succeed.
4148 void *cpu_physical_memory_map(target_phys_addr_t addr
,
4149 target_phys_addr_t
*plen
,
4152 target_phys_addr_t len
= *plen
;
4153 target_phys_addr_t todo
= 0;
4155 target_phys_addr_t page
;
4158 ram_addr_t raddr
= RAM_ADDR_MAX
;
4163 page
= addr
& TARGET_PAGE_MASK
;
4164 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4167 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
4169 pd
= IO_MEM_UNASSIGNED
;
4171 pd
= p
->phys_offset
;
4174 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4175 if (todo
|| bounce
.buffer
) {
4178 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
4182 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
4186 return bounce
.buffer
;
4189 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4197 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
4202 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4203 * Will also mark the memory as dirty if is_write == 1. access_len gives
4204 * the amount of memory that was actually read or written by the caller.
4206 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4207 int is_write
, target_phys_addr_t access_len
)
4209 if (buffer
!= bounce
.buffer
) {
4211 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4212 while (access_len
) {
4214 l
= TARGET_PAGE_SIZE
;
4217 if (!cpu_physical_memory_is_dirty(addr1
)) {
4218 /* invalidate code */
4219 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4221 cpu_physical_memory_set_dirty_flags(
4222 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4228 if (xen_enabled()) {
4229 xen_invalidate_map_cache_entry(buffer
);
4234 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4236 qemu_vfree(bounce
.buffer
);
4237 bounce
.buffer
= NULL
;
4238 cpu_notify_map_clients();
4241 /* warning: addr must be aligned */
4242 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4243 enum device_endian endian
)
4251 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4253 pd
= IO_MEM_UNASSIGNED
;
4255 pd
= p
->phys_offset
;
4258 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4259 !(pd
& IO_MEM_ROMD
)) {
4261 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4263 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4264 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4265 #if defined(TARGET_WORDS_BIGENDIAN)
4266 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4270 if (endian
== DEVICE_BIG_ENDIAN
) {
4276 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4277 (addr
& ~TARGET_PAGE_MASK
);
4279 case DEVICE_LITTLE_ENDIAN
:
4280 val
= ldl_le_p(ptr
);
4282 case DEVICE_BIG_ENDIAN
:
4283 val
= ldl_be_p(ptr
);
4293 uint32_t ldl_phys(target_phys_addr_t addr
)
4295 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4298 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4300 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4303 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4305 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4308 /* warning: addr must be aligned */
4309 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4310 enum device_endian endian
)
4318 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4320 pd
= IO_MEM_UNASSIGNED
;
4322 pd
= p
->phys_offset
;
4325 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4326 !(pd
& IO_MEM_ROMD
)) {
4328 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4330 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4332 /* XXX This is broken when device endian != cpu endian.
4333 Fix and add "endian" variable check */
4334 #ifdef TARGET_WORDS_BIGENDIAN
4335 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
4336 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
4338 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
4339 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
4343 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4344 (addr
& ~TARGET_PAGE_MASK
);
4346 case DEVICE_LITTLE_ENDIAN
:
4347 val
= ldq_le_p(ptr
);
4349 case DEVICE_BIG_ENDIAN
:
4350 val
= ldq_be_p(ptr
);
4360 uint64_t ldq_phys(target_phys_addr_t addr
)
4362 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4365 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4367 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4370 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4372 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4376 uint32_t ldub_phys(target_phys_addr_t addr
)
4379 cpu_physical_memory_read(addr
, &val
, 1);
4383 /* warning: addr must be aligned */
4384 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4385 enum device_endian endian
)
4393 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4395 pd
= IO_MEM_UNASSIGNED
;
4397 pd
= p
->phys_offset
;
4400 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
4401 !(pd
& IO_MEM_ROMD
)) {
4403 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4405 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4406 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr
);
4407 #if defined(TARGET_WORDS_BIGENDIAN)
4408 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4412 if (endian
== DEVICE_BIG_ENDIAN
) {
4418 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4419 (addr
& ~TARGET_PAGE_MASK
);
4421 case DEVICE_LITTLE_ENDIAN
:
4422 val
= lduw_le_p(ptr
);
4424 case DEVICE_BIG_ENDIAN
:
4425 val
= lduw_be_p(ptr
);
4435 uint32_t lduw_phys(target_phys_addr_t addr
)
4437 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4440 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4442 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4445 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4447 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4450 /* warning: addr must be aligned. The ram page is not masked as dirty
4451 and the code inside is not invalidated. It is useful if the dirty
4452 bits are used to track modified PTEs */
4453 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4460 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4462 pd
= IO_MEM_UNASSIGNED
;
4464 pd
= p
->phys_offset
;
4467 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4468 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4470 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4471 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4473 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4474 ptr
= qemu_get_ram_ptr(addr1
);
4477 if (unlikely(in_migration
)) {
4478 if (!cpu_physical_memory_is_dirty(addr1
)) {
4479 /* invalidate code */
4480 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4482 cpu_physical_memory_set_dirty_flags(
4483 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4489 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4496 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4498 pd
= IO_MEM_UNASSIGNED
;
4500 pd
= p
->phys_offset
;
4503 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4504 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4506 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4507 #ifdef TARGET_WORDS_BIGENDIAN
4508 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
4509 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
4511 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4512 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
4515 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4516 (addr
& ~TARGET_PAGE_MASK
);
4521 /* warning: addr must be aligned */
4522 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4523 enum device_endian endian
)
4530 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4532 pd
= IO_MEM_UNASSIGNED
;
4534 pd
= p
->phys_offset
;
4537 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4538 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4540 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4541 #if defined(TARGET_WORDS_BIGENDIAN)
4542 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4546 if (endian
== DEVICE_BIG_ENDIAN
) {
4550 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
4552 unsigned long addr1
;
4553 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4555 ptr
= qemu_get_ram_ptr(addr1
);
4557 case DEVICE_LITTLE_ENDIAN
:
4560 case DEVICE_BIG_ENDIAN
:
4567 if (!cpu_physical_memory_is_dirty(addr1
)) {
4568 /* invalidate code */
4569 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4571 cpu_physical_memory_set_dirty_flags(addr1
,
4572 (0xff & ~CODE_DIRTY_FLAG
));
4577 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4579 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4582 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4584 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4587 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4589 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4593 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4596 cpu_physical_memory_write(addr
, &v
, 1);
4599 /* warning: addr must be aligned */
4600 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4601 enum device_endian endian
)
4608 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4610 pd
= IO_MEM_UNASSIGNED
;
4612 pd
= p
->phys_offset
;
4615 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
4616 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
4618 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
4619 #if defined(TARGET_WORDS_BIGENDIAN)
4620 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4624 if (endian
== DEVICE_BIG_ENDIAN
) {
4628 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr
, val
);
4630 unsigned long addr1
;
4631 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4633 ptr
= qemu_get_ram_ptr(addr1
);
4635 case DEVICE_LITTLE_ENDIAN
:
4638 case DEVICE_BIG_ENDIAN
:
4645 if (!cpu_physical_memory_is_dirty(addr1
)) {
4646 /* invalidate code */
4647 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4649 cpu_physical_memory_set_dirty_flags(addr1
,
4650 (0xff & ~CODE_DIRTY_FLAG
));
4655 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4657 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4660 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4662 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4665 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4667 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4671 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4674 cpu_physical_memory_write(addr
, &val
, 8);
4677 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4679 val
= cpu_to_le64(val
);
4680 cpu_physical_memory_write(addr
, &val
, 8);
4683 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4685 val
= cpu_to_be64(val
);
4686 cpu_physical_memory_write(addr
, &val
, 8);
4689 /* virtual memory access for debug (includes writing to ROM) */
4690 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4691 uint8_t *buf
, int len
, int is_write
)
4694 target_phys_addr_t phys_addr
;
4698 page
= addr
& TARGET_PAGE_MASK
;
4699 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4700 /* if no physical page mapped, return an error */
4701 if (phys_addr
== -1)
4703 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4706 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4708 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4710 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4719 /* in deterministic execution mode, instructions doing device I/Os
4720 must be at the end of the TB */
4721 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4723 TranslationBlock
*tb
;
4725 target_ulong pc
, cs_base
;
4728 tb
= tb_find_pc((unsigned long)retaddr
);
4730 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4733 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4734 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4735 /* Calculate how many instructions had been executed before the fault
4737 n
= n
- env
->icount_decr
.u16
.low
;
4738 /* Generate a new TB ending on the I/O insn. */
4740 /* On MIPS and SH, delay slot instructions can only be restarted if
4741 they were already the first instruction in the TB. If this is not
4742 the first instruction in a TB then re-execute the preceding
4744 #if defined(TARGET_MIPS)
4745 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4746 env
->active_tc
.PC
-= 4;
4747 env
->icount_decr
.u16
.low
++;
4748 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4750 #elif defined(TARGET_SH4)
4751 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4754 env
->icount_decr
.u16
.low
++;
4755 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4758 /* This should never happen. */
4759 if (n
> CF_COUNT_MASK
)
4760 cpu_abort(env
, "TB too big during recompile");
4762 cflags
= n
| CF_LAST_IO
;
4764 cs_base
= tb
->cs_base
;
4766 tb_phys_invalidate(tb
, -1);
4767 /* FIXME: In theory this could raise an exception. In practice
4768 we have already translated the block once so it's probably ok. */
4769 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4770 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4771 the first in the TB) then we end up generating a whole new TB and
4772 repeating the fault, which is horribly inefficient.
4773 Better would be to execute just this insn uncached, or generate a
4775 cpu_resume_from_signal(env
, NULL
);
4778 #if !defined(CONFIG_USER_ONLY)
4780 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4782 int i
, target_code_size
, max_target_code_size
;
4783 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4784 TranslationBlock
*tb
;
4786 target_code_size
= 0;
4787 max_target_code_size
= 0;
4789 direct_jmp_count
= 0;
4790 direct_jmp2_count
= 0;
4791 for(i
= 0; i
< nb_tbs
; i
++) {
4793 target_code_size
+= tb
->size
;
4794 if (tb
->size
> max_target_code_size
)
4795 max_target_code_size
= tb
->size
;
4796 if (tb
->page_addr
[1] != -1)
4798 if (tb
->tb_next_offset
[0] != 0xffff) {
4800 if (tb
->tb_next_offset
[1] != 0xffff) {
4801 direct_jmp2_count
++;
4805 /* XXX: avoid using doubles ? */
4806 cpu_fprintf(f
, "Translation buffer state:\n");
4807 cpu_fprintf(f
, "gen code size %td/%ld\n",
4808 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4809 cpu_fprintf(f
, "TB count %d/%d\n",
4810 nb_tbs
, code_gen_max_blocks
);
4811 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4812 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4813 max_target_code_size
);
4814 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4815 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4816 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4817 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4819 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4820 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4822 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4824 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4825 cpu_fprintf(f
, "\nStatistics:\n");
4826 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4827 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4828 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4829 tcg_dump_info(f
, cpu_fprintf
);
4832 #define MMUSUFFIX _cmmu
4834 #define GETPC() NULL
4835 #define env cpu_single_env
4836 #define SOFTMMU_CODE_ACCESS
4839 #include "softmmu_template.h"
4842 #include "softmmu_template.h"
4845 #include "softmmu_template.h"
4848 #include "softmmu_template.h"